2016-02-23 01:58:16 -08:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-02-01 01:55:07 -08:00
|
|
|
package scrape
|
2016-02-23 01:58:16 -08:00
|
|
|
|
|
|
|
import (
|
2017-01-15 08:33:07 -08:00
|
|
|
"bytes"
|
2021-05-15 19:19:22 -07:00
|
|
|
"compress/gzip"
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2016-02-28 00:51:02 -08:00
|
|
|
"fmt"
|
2017-01-15 08:33:07 -08:00
|
|
|
"io"
|
2017-04-13 10:07:23 -07:00
|
|
|
"math"
|
2016-02-28 14:59:03 -08:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
|
|
|
"net/url"
|
|
|
|
"strings"
|
2016-02-28 00:51:02 -08:00
|
|
|
"sync"
|
2016-02-23 01:58:16 -08:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
2019-03-25 16:01:12 -07:00
|
|
|
"github.com/pkg/errors"
|
2023-04-21 12:14:19 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2019-02-13 05:24:22 -08:00
|
|
|
dto "github.com/prometheus/client_model/go"
|
2020-01-22 04:13:47 -08:00
|
|
|
config_util "github.com/prometheus/common/config"
|
2016-02-23 01:58:16 -08:00
|
|
|
"github.com/prometheus/common/model"
|
2020-10-29 02:43:23 -07:00
|
|
|
"github.com/stretchr/testify/require"
|
2016-02-23 01:58:16 -08:00
|
|
|
|
2016-02-23 02:56:09 -08:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2018-01-04 06:13:31 -08:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/relabel"
|
|
|
|
"github.com/prometheus/prometheus/model/textparse"
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
|
|
|
"github.com/prometheus/prometheus/model/value"
|
2016-02-28 00:51:02 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2021-11-28 23:54:23 -08:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
2019-08-08 18:35:39 -07:00
|
|
|
"github.com/prometheus/prometheus/util/teststorage"
|
2017-09-15 02:08:51 -07:00
|
|
|
"github.com/prometheus/prometheus/util/testutil"
|
2016-02-23 01:58:16 -08:00
|
|
|
)
|
|
|
|
|
2020-07-27 01:38:08 -07:00
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
testutil.TolerantVerifyLeak(m)
|
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
func TestNewScrapePool(t *testing.T) {
|
|
|
|
var (
|
2019-02-13 05:24:22 -08:00
|
|
|
app = &nopAppendable{}
|
|
|
|
cfg = &config.ScrapeConfig{}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
2016-02-28 00:51:02 -08:00
|
|
|
)
|
|
|
|
|
2016-12-30 12:35:35 -08:00
|
|
|
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
2016-02-28 00:51:02 -08:00
|
|
|
t.Fatalf("Wrong sample appender")
|
|
|
|
}
|
|
|
|
if sp.config != cfg {
|
|
|
|
t.Fatalf("Wrong scrape config")
|
|
|
|
}
|
|
|
|
if sp.newLoop == nil {
|
|
|
|
t.Fatalf("newLoop function not initialized")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-04 06:13:31 -08:00
|
|
|
func TestDroppedTargetsList(t *testing.T) {
|
|
|
|
var (
|
|
|
|
app = &nopAppendable{}
|
|
|
|
cfg = &config.ScrapeConfig{
|
|
|
|
JobName: "dropMe",
|
|
|
|
ScrapeInterval: model.Duration(1),
|
2018-12-18 03:26:36 -08:00
|
|
|
RelabelConfigs: []*relabel.Config{
|
2018-01-04 06:13:31 -08:00
|
|
|
{
|
2018-12-18 03:26:36 -08:00
|
|
|
Action: relabel.Drop,
|
|
|
|
Regex: relabel.MustNewRegexp("dropMe"),
|
2018-01-04 06:13:31 -08:00
|
|
|
SourceLabels: model.LabelNames{"job"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tgs = []*targetgroup.Group{
|
|
|
|
{
|
|
|
|
Targets: []model.LabelSet{
|
2019-01-16 14:28:08 -08:00
|
|
|
{model.AddressLabel: "127.0.0.1:9090"},
|
2018-01-04 06:13:31 -08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
2021-08-31 08:37:32 -07:00
|
|
|
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
2018-01-04 06:13:31 -08:00
|
|
|
expectedLength = 1
|
|
|
|
)
|
|
|
|
sp.Sync(tgs)
|
|
|
|
sp.Sync(tgs)
|
|
|
|
if len(sp.droppedTargets) != expectedLength {
|
|
|
|
t.Fatalf("Length of dropped targets exceeded expected length, expected %v, got %v", expectedLength, len(sp.droppedTargets))
|
|
|
|
}
|
|
|
|
if sp.droppedTargets[0].DiscoveredLabels().String() != expectedLabelSetString {
|
|
|
|
t.Fatalf("Got %v, expected %v", sp.droppedTargets[0].DiscoveredLabels().String(), expectedLabelSetString)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 02:29:27 -08:00
|
|
|
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
|
|
|
|
// even when new labels don't affect the target `hash`.
|
|
|
|
func TestDiscoveredLabelsUpdate(t *testing.T) {
|
|
|
|
sp := &scrapePool{}
|
|
|
|
// These are used when syncing so need this to avoid a panic.
|
|
|
|
sp.config = &config.ScrapeConfig{
|
|
|
|
ScrapeInterval: model.Duration(1),
|
|
|
|
ScrapeTimeout: model.Duration(1),
|
|
|
|
}
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets = make(map[uint64]*Target)
|
2018-02-07 02:29:27 -08:00
|
|
|
t1 := &Target{
|
2022-05-30 07:37:16 -07:00
|
|
|
discoveredLabels: labels.FromStrings("label", "name"),
|
2018-02-07 02:29:27 -08:00
|
|
|
}
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets[t1.hash()] = t1
|
2018-02-07 02:29:27 -08:00
|
|
|
|
|
|
|
t2 := &Target{
|
2022-05-30 07:37:16 -07:00
|
|
|
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
|
2018-02-07 02:29:27 -08:00
|
|
|
}
|
|
|
|
sp.sync([]*Target{t2})
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, t2.DiscoveredLabels(), sp.activeTargets[t1.hash()].DiscoveredLabels())
|
2018-02-07 02:29:27 -08:00
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
type testLoop struct {
|
2020-07-30 05:20:24 -07:00
|
|
|
startFunc func(interval, timeout time.Duration, errc chan<- error)
|
|
|
|
stopFunc func()
|
|
|
|
forcedErr error
|
|
|
|
forcedErrMtx sync.Mutex
|
2020-09-30 11:21:32 -07:00
|
|
|
runOnce bool
|
2021-08-31 08:37:32 -07:00
|
|
|
interval time.Duration
|
|
|
|
timeout time.Duration
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2021-08-31 08:37:32 -07:00
|
|
|
func (l *testLoop) run(errc chan<- error) {
|
2020-09-30 11:21:32 -07:00
|
|
|
if l.runOnce {
|
|
|
|
panic("loop must be started only once")
|
|
|
|
}
|
|
|
|
l.runOnce = true
|
2021-08-31 08:37:32 -07:00
|
|
|
l.startFunc(l.interval, l.timeout, errc)
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2020-03-20 09:43:26 -07:00
|
|
|
func (l *testLoop) disableEndOfRunStalenessMarkers() {
|
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
func (l *testLoop) setForcedError(err error) {
|
|
|
|
l.forcedErrMtx.Lock()
|
|
|
|
defer l.forcedErrMtx.Unlock()
|
|
|
|
l.forcedErr = err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *testLoop) getForcedError() error {
|
|
|
|
l.forcedErrMtx.Lock()
|
|
|
|
defer l.forcedErrMtx.Unlock()
|
|
|
|
return l.forcedErr
|
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
func (l *testLoop) stop() {
|
|
|
|
l.stopFunc()
|
|
|
|
}
|
|
|
|
|
2020-01-22 04:13:47 -08:00
|
|
|
func (l *testLoop) getCache() *scrapeCache {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
func TestScrapePoolStop(t *testing.T) {
|
|
|
|
sp := &scrapePool{
|
2018-09-26 02:20:56 -07:00
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
cancel: func() {},
|
2019-04-10 05:20:00 -07:00
|
|
|
client: http.DefaultClient,
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
var mtx sync.Mutex
|
2016-02-28 10:56:18 -08:00
|
|
|
stopped := map[uint64]bool{}
|
2016-02-28 00:51:02 -08:00
|
|
|
numTargets := 20
|
|
|
|
|
|
|
|
// Stopping the scrape pool must call stop() on all scrape loops,
|
|
|
|
// clean them and the respective targets up. It must wait until each loop's
|
|
|
|
// stop function returned before returning itself.
|
|
|
|
|
|
|
|
for i := 0; i < numTargets; i++ {
|
|
|
|
t := &Target{
|
2016-12-29 00:27:30 -08:00
|
|
|
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
l := &testLoop{}
|
2023-04-26 07:26:58 -07:00
|
|
|
d := time.Duration((i+1)*20) * time.Millisecond
|
2016-02-28 00:51:02 -08:00
|
|
|
l.stopFunc = func() {
|
2023-04-26 07:26:58 -07:00
|
|
|
time.Sleep(d)
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
mtx.Lock()
|
2016-02-28 10:56:18 -08:00
|
|
|
stopped[t.hash()] = true
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets[t.hash()] = t
|
2016-02-28 10:56:18 -08:00
|
|
|
sp.loops[t.hash()] = l
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
done := make(chan struct{})
|
|
|
|
stopTime := time.Now()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sp.stop()
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("scrapeLoop.stop() did not return as expected")
|
|
|
|
case <-done:
|
|
|
|
// This should have taken at least as long as the last target slept.
|
|
|
|
if time.Since(stopTime) < time.Duration(numTargets*20)*time.Millisecond {
|
|
|
|
t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx.Lock()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Unlock()
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
|
|
|
|
require.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapePoolReload(t *testing.T) {
|
|
|
|
var mtx sync.Mutex
|
|
|
|
numTargets := 20
|
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
stopped := map[uint64]bool{}
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
reloadCfg := &config.ScrapeConfig{
|
|
|
|
ScrapeInterval: model.Duration(3 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
|
|
|
}
|
2016-09-14 20:23:28 -07:00
|
|
|
// On starting to run, new loops created on reload check whether their preceding
|
2016-02-28 00:51:02 -08:00
|
|
|
// equivalents have been stopped.
|
2019-03-12 03:26:18 -07:00
|
|
|
newLoop := func(opts scrapeLoopOptions) loop {
|
2021-08-31 08:37:32 -07:00
|
|
|
l := &testLoop{interval: time.Duration(reloadCfg.ScrapeInterval), timeout: time.Duration(reloadCfg.ScrapeTimeout)}
|
2016-02-28 00:51:02 -08:00
|
|
|
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 3*time.Second, interval, "Unexpected scrape interval")
|
|
|
|
require.Equal(t, 2*time.Second, timeout, "Unexpected scrape timeout")
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Lock()
|
2019-11-04 15:43:42 -08:00
|
|
|
targetScraper := opts.scraper.(*targetScraper)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, stopped[targetScraper.hash()], "Scrape loop for %v not stopped yet", targetScraper)
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
sp := &scrapePool{
|
2018-09-26 02:20:56 -07:00
|
|
|
appendable: &nopAppendable{},
|
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
newLoop: newLoop,
|
|
|
|
logger: nil,
|
2019-04-10 05:20:00 -07:00
|
|
|
client: http.DefaultClient,
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reloading a scrape pool with a new scrape configuration must stop all scrape
|
2016-09-14 20:23:28 -07:00
|
|
|
// loops and start new ones. A new loop must not be started before the preceding
|
2016-02-28 00:51:02 -08:00
|
|
|
// one terminated.
|
|
|
|
|
|
|
|
for i := 0; i < numTargets; i++ {
|
2021-08-31 08:37:32 -07:00
|
|
|
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
|
2016-02-28 00:51:02 -08:00
|
|
|
t := &Target{
|
2021-08-31 08:37:32 -07:00
|
|
|
labels: labels,
|
|
|
|
discoveredLabels: labels,
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
l := &testLoop{}
|
2023-04-26 07:26:58 -07:00
|
|
|
d := time.Duration((i+1)*20) * time.Millisecond
|
2016-02-28 00:51:02 -08:00
|
|
|
l.stopFunc = func() {
|
2023-04-26 07:26:58 -07:00
|
|
|
time.Sleep(d)
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
mtx.Lock()
|
2016-02-28 10:56:18 -08:00
|
|
|
stopped[t.hash()] = true
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets[t.hash()] = t
|
2016-02-28 10:56:18 -08:00
|
|
|
sp.loops[t.hash()] = l
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
beforeTargets := map[uint64]*Target{}
|
2018-09-26 02:20:56 -07:00
|
|
|
for h, t := range sp.activeTargets {
|
2016-02-28 10:56:18 -08:00
|
|
|
beforeTargets[h] = t
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
reloadTime := time.Now()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sp.reload(reloadCfg)
|
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("scrapeLoop.reload() did not return as expected")
|
|
|
|
case <-done:
|
|
|
|
// This should have taken at least as long as the last target slept.
|
|
|
|
if time.Since(reloadTime) < time.Duration(numTargets*20)*time.Millisecond {
|
|
|
|
t.Fatalf("scrapeLoop.stop() exited before all targets stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx.Lock()
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
|
2016-02-28 00:51:02 -08:00
|
|
|
mtx.Unlock()
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, sp.activeTargets, beforeTargets, "Reloading affected target states unexpectedly")
|
|
|
|
require.Equal(t, numTargets, len(sp.loops), "Unexpected number of stopped loops after reload")
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2022-06-28 02:58:52 -07:00
|
|
|
func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
|
|
|
reloadCfg := &config.ScrapeConfig{
|
|
|
|
ScrapeInterval: model.Duration(3 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
|
|
|
}
|
|
|
|
newLoop := func(opts scrapeLoopOptions) loop {
|
2023-04-09 00:08:40 -07:00
|
|
|
l := &testLoop{interval: opts.interval, timeout: opts.timeout}
|
2022-06-28 02:58:52 -07:00
|
|
|
l.startFunc = func(interval, timeout time.Duration, errc chan<- error) {
|
|
|
|
require.Equal(t, 5*time.Second, interval, "Unexpected scrape interval")
|
|
|
|
require.Equal(t, 3*time.Second, timeout, "Unexpected scrape timeout")
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
sp := &scrapePool{
|
|
|
|
appendable: &nopAppendable{},
|
|
|
|
activeTargets: map[uint64]*Target{
|
|
|
|
1: {
|
|
|
|
labels: labels.FromStrings(model.ScrapeIntervalLabel, "5s", model.ScrapeTimeoutLabel, "3s"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
loops: map[uint64]loop{
|
|
|
|
1: noopLoop(),
|
|
|
|
},
|
|
|
|
newLoop: newLoop,
|
|
|
|
logger: nil,
|
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := sp.reload(reloadCfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to reload configuration: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
func TestScrapePoolTargetLimit(t *testing.T) {
|
2020-09-30 11:21:32 -07:00
|
|
|
var wg sync.WaitGroup
|
2020-07-30 05:20:24 -07:00
|
|
|
// On starting to run, new loops created on reload check whether their preceding
|
|
|
|
// equivalents have been stopped.
|
|
|
|
newLoop := func(opts scrapeLoopOptions) loop {
|
2020-09-30 11:21:32 -07:00
|
|
|
wg.Add(1)
|
2020-07-30 05:20:24 -07:00
|
|
|
l := &testLoop{
|
2020-09-30 11:21:32 -07:00
|
|
|
startFunc: func(interval, timeout time.Duration, errc chan<- error) {
|
|
|
|
wg.Done()
|
|
|
|
},
|
|
|
|
stopFunc: func() {},
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
sp := &scrapePool{
|
|
|
|
appendable: &nopAppendable{},
|
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
newLoop: newLoop,
|
2021-08-31 08:37:32 -07:00
|
|
|
logger: log.NewNopLogger(),
|
2020-07-30 05:20:24 -07:00
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
|
2021-10-22 01:06:44 -07:00
|
|
|
tgs := []*targetgroup.Group{}
|
2020-07-30 05:20:24 -07:00
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
tgs = append(tgs,
|
|
|
|
&targetgroup.Group{
|
|
|
|
Targets: []model.LabelSet{
|
|
|
|
{model.AddressLabel: model.LabelValue(fmt.Sprintf("127.0.0.1:%d", 9090+i))},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
var limit uint
|
|
|
|
reloadWithLimit := func(l uint) {
|
|
|
|
limit = l
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
2020-07-30 05:20:24 -07:00
|
|
|
ScrapeInterval: model.Duration(3 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
|
|
|
TargetLimit: l,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
var targets int
|
|
|
|
loadTargets := func(n int) {
|
|
|
|
targets = n
|
|
|
|
sp.Sync(tgs[:n])
|
|
|
|
}
|
|
|
|
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning := func() {
|
|
|
|
wg.Wait()
|
|
|
|
for _, l := range sp.loops {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, l.(*testLoop).runOnce, "loop should be running")
|
2020-09-30 11:21:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage := func(shouldErr bool) {
|
|
|
|
for _, l := range sp.loops {
|
|
|
|
lerr := l.(*testLoop).getForcedError()
|
|
|
|
if shouldErr {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NotNil(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
|
|
|
|
require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error())
|
2020-07-30 05:20:24 -07:00
|
|
|
} else {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, nil, lerr)
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
reloadWithLimit(0)
|
|
|
|
loadTargets(50)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
|
|
|
|
// Simulate an initial config with a limit.
|
|
|
|
sp.config.TargetLimit = 30
|
|
|
|
limit = 30
|
|
|
|
loadTargets(50)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(true)
|
|
|
|
|
|
|
|
reloadWithLimit(50)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(false)
|
|
|
|
|
|
|
|
reloadWithLimit(40)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(true)
|
|
|
|
|
|
|
|
loadTargets(30)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(false)
|
|
|
|
|
|
|
|
loadTargets(40)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(false)
|
|
|
|
|
|
|
|
loadTargets(41)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(true)
|
|
|
|
|
2021-07-27 03:48:55 -07:00
|
|
|
reloadWithLimit(0)
|
|
|
|
validateIsRunning()
|
|
|
|
validateErrorMessage(false)
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
reloadWithLimit(51)
|
2020-09-30 11:21:32 -07:00
|
|
|
validateIsRunning()
|
|
|
|
validateErrorMessage(false)
|
|
|
|
|
|
|
|
tgs = append(tgs,
|
|
|
|
&targetgroup.Group{
|
|
|
|
Targets: []model.LabelSet{
|
|
|
|
{model.AddressLabel: model.LabelValue("127.0.0.1:1090")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&targetgroup.Group{
|
|
|
|
Targets: []model.LabelSet{
|
|
|
|
{model.AddressLabel: model.LabelValue("127.0.0.1:1090")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
sp.Sync(tgs)
|
|
|
|
validateIsRunning()
|
2020-07-30 05:20:24 -07:00
|
|
|
validateErrorMessage(false)
|
|
|
|
}
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
func TestScrapePoolAppender(t *testing.T) {
|
|
|
|
cfg := &config.ScrapeConfig{}
|
2016-12-30 12:35:35 -08:00
|
|
|
app := &nopAppendable{}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ := newScrapePool(cfg, app, 0, nil, &Options{})
|
2016-02-23 02:56:09 -08:00
|
|
|
|
2019-03-12 03:26:18 -07:00
|
|
|
loop := sp.newLoop(scrapeLoopOptions{
|
|
|
|
target: &Target{},
|
|
|
|
})
|
2018-04-12 07:54:53 -07:00
|
|
|
appl, ok := loop.(*scrapeLoop)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2023-04-21 12:14:19 -07:00
|
|
|
wrapped := appender(appl.appender(context.Background()), 0, 0)
|
2016-02-23 02:56:09 -08:00
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
tl, ok := wrapped.(*timeLimitAppender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected timeLimitAppender but got %T", wrapped)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
|
|
|
_, ok = tl.Appender.(nopAppender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
2016-02-23 02:56:09 -08:00
|
|
|
|
2021-12-10 04:03:28 -08:00
|
|
|
sampleLimit := 100
|
2019-03-12 03:26:18 -07:00
|
|
|
loop = sp.newLoop(scrapeLoopOptions{
|
2021-05-06 01:56:21 -07:00
|
|
|
target: &Target{},
|
2021-12-10 04:03:28 -08:00
|
|
|
sampleLimit: sampleLimit,
|
2019-03-12 03:26:18 -07:00
|
|
|
})
|
2018-04-12 07:54:53 -07:00
|
|
|
appl, ok = loop.(*scrapeLoop)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected scrapeLoop but got %T", loop)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2023-04-21 12:14:19 -07:00
|
|
|
wrapped = appender(appl.appender(context.Background()), sampleLimit, 0)
|
2017-09-08 05:34:45 -07:00
|
|
|
|
|
|
|
sl, ok := wrapped.(*limitAppender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected limitAppender but got %T", wrapped)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
tl, ok = sl.Appender.(*timeLimitAppender)
|
2021-12-10 04:03:28 -08:00
|
|
|
require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
|
|
|
_, ok = tl.Appender.(nopAppender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
2023-04-21 12:14:19 -07:00
|
|
|
|
|
|
|
wrapped = appender(appl.appender(context.Background()), sampleLimit, 100)
|
|
|
|
|
|
|
|
bl, ok := wrapped.(*bucketLimitAppender)
|
|
|
|
require.True(t, ok, "Expected bucketLimitAppender but got %T", wrapped)
|
|
|
|
|
|
|
|
sl, ok = bl.Appender.(*limitAppender)
|
|
|
|
require.True(t, ok, "Expected limitAppender but got %T", bl)
|
|
|
|
|
|
|
|
tl, ok = sl.Appender.(*timeLimitAppender)
|
|
|
|
require.True(t, ok, "Expected timeLimitAppender but got %T", sl.Appender)
|
|
|
|
|
|
|
|
_, ok = tl.Appender.(nopAppender)
|
|
|
|
require.True(t, ok, "Expected base appender but got %T", tl.Appender)
|
2016-02-23 02:56:09 -08:00
|
|
|
}
|
|
|
|
|
2018-04-13 05:21:41 -07:00
|
|
|
func TestScrapePoolRaces(t *testing.T) {
|
2021-08-31 08:37:32 -07:00
|
|
|
interval, _ := model.ParseDuration("1s")
|
|
|
|
timeout, _ := model.ParseDuration("500ms")
|
2018-04-13 05:21:41 -07:00
|
|
|
newConfig := func() *config.ScrapeConfig {
|
|
|
|
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
|
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{})
|
2018-04-13 05:21:41 -07:00
|
|
|
tgts := []*targetgroup.Group{
|
2019-01-16 14:28:08 -08:00
|
|
|
{
|
2018-04-13 05:21:41 -07:00
|
|
|
Targets: []model.LabelSet{
|
2019-01-16 14:28:08 -08:00
|
|
|
{model.AddressLabel: "127.0.0.1:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.2:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.3:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.4:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.5:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.6:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.7:9090"},
|
|
|
|
{model.AddressLabel: "127.0.0.8:9090"},
|
2018-04-13 05:21:41 -07:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.Sync(tgts)
|
|
|
|
active := sp.ActiveTargets()
|
|
|
|
dropped := sp.DroppedTargets()
|
2018-04-13 05:21:41 -07:00
|
|
|
expectedActive, expectedDropped := len(tgts[0].Targets), 0
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, expectedActive, len(active), "Invalid number of active targets")
|
|
|
|
require.Equal(t, expectedDropped, len(dropped), "Invalid number of dropped targets")
|
2018-04-13 05:21:41 -07:00
|
|
|
|
|
|
|
for i := 0; i < 20; i++ {
|
2023-04-09 00:08:40 -07:00
|
|
|
time.Sleep(10 * time.Millisecond)
|
2018-04-13 05:21:41 -07:00
|
|
|
sp.reload(newConfig())
|
|
|
|
}
|
|
|
|
sp.stop()
|
|
|
|
}
|
|
|
|
|
2020-09-30 11:21:32 -07:00
|
|
|
func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
newLoop := func(opts scrapeLoopOptions) loop {
|
|
|
|
wg.Add(1)
|
|
|
|
l := &testLoop{
|
|
|
|
startFunc: func(interval, timeout time.Duration, errc chan<- error) {
|
|
|
|
wg.Done()
|
|
|
|
},
|
|
|
|
stopFunc: func() {},
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
sp := &scrapePool{
|
|
|
|
appendable: &nopAppendable{},
|
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
newLoop: newLoop,
|
|
|
|
logger: nil,
|
|
|
|
client: http.DefaultClient,
|
|
|
|
}
|
|
|
|
|
|
|
|
tgs := []*targetgroup.Group{
|
|
|
|
{
|
|
|
|
Targets: []model.LabelSet{
|
|
|
|
{model.AddressLabel: model.LabelValue("127.0.0.1:9090")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Targets: []model.LabelSet{
|
|
|
|
{model.AddressLabel: model.LabelValue("127.0.0.1:9090")},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
2020-09-30 11:21:32 -07:00
|
|
|
ScrapeInterval: model.Duration(3 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
|
|
|
}))
|
|
|
|
sp.Sync(tgs)
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 1, len(sp.loops))
|
2020-09-30 11:21:32 -07:00
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
for _, l := range sp.loops {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, l.(*testLoop).runOnce, "loop should be running")
|
2020-09-30 11:21:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-10 08:59:02 -07:00
|
|
|
func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
2016-02-28 00:51:02 -08:00
|
|
|
scraper := &testScraper{}
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2019-03-12 03:46:15 -07:00
|
|
|
nil, nil, 0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
1,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
|
2017-01-07 08:28:49 -08:00
|
|
|
// loops are started asynchronously. Thus it's possible, that a loop is stopped
|
2016-02-28 00:51:02 -08:00
|
|
|
// again before having started properly.
|
|
|
|
// Stopping not-yet-started loops must block until the run method was called and exited.
|
|
|
|
// The run method must exit immediately.
|
|
|
|
|
|
|
|
stopDone := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
sl.stop()
|
|
|
|
close(stopDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-stopDone:
|
|
|
|
t.Fatalf("Stopping terminated before run exited successfully")
|
|
|
|
case <-time.After(500 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Running the scrape loop must exit before calling the scraper even once.
|
2017-01-15 08:33:07 -08:00
|
|
|
scraper.scrapeFunc = func(context.Context, io.Writer) error {
|
2016-02-28 00:51:02 -08:00
|
|
|
t.Fatalf("scraper was called for terminated scrape loop")
|
2017-01-15 08:33:07 -08:00
|
|
|
return nil
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
runDone := make(chan struct{})
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2016-02-28 00:51:02 -08:00
|
|
|
close(runDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-runDone:
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
t.Fatalf("Running terminated scrape loop did not exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-stopDone:
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
t.Fatalf("Stopping did not terminate after running exited")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
func nopMutator(l labels.Labels) labels.Labels { return l }
|
|
|
|
|
2017-05-10 08:59:02 -07:00
|
|
|
func TestScrapeLoopStop(t *testing.T) {
|
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2017-09-08 05:34:45 -07:00
|
|
|
appender = &collectResultAppender{}
|
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2017-05-10 08:59:02 -07:00
|
|
|
)
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
// Terminate loop after 2 scrapes.
|
|
|
|
numScrapes := 0
|
2017-05-10 08:59:02 -07:00
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
2017-05-26 01:44:48 -07:00
|
|
|
numScrapes++
|
2017-05-10 08:59:02 -07:00
|
|
|
if numScrapes == 2 {
|
2017-05-26 01:44:48 -07:00
|
|
|
go sl.stop()
|
2020-08-07 06:58:16 -07:00
|
|
|
<-sl.ctx.Done()
|
2017-05-10 08:59:02 -07:00
|
|
|
}
|
|
|
|
w.Write([]byte("metric_a 42\n"))
|
2020-08-07 06:58:16 -07:00
|
|
|
return ctx.Err()
|
2017-05-10 08:59:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2017-05-10 08:59:02 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:24:00 -07:00
|
|
|
// We expected 1 actual sample for each scrape plus 5 for report samples.
|
2017-09-08 05:34:45 -07:00
|
|
|
// At least 2 scrapes were made, plus the final stale markers.
|
2019-05-08 14:24:00 -07:00
|
|
|
if len(appender.result) < 6*3 || len(appender.result)%6 != 0 {
|
|
|
|
t.Fatalf("Expected at least 3 scrapes with 6 samples each, got %d samples", len(appender.result))
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2018-04-08 02:51:54 -07:00
|
|
|
// All samples in a scrape must have the same timestamp.
|
2017-09-08 05:34:45 -07:00
|
|
|
var ts int64
|
|
|
|
for i, s := range appender.result {
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case i%6 == 0:
|
2017-09-08 05:34:45 -07:00
|
|
|
ts = s.t
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case s.t != ts:
|
2017-09-08 05:34:45 -07:00
|
|
|
t.Fatalf("Unexpected multiple timestamps within single scrape")
|
|
|
|
}
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2017-09-08 05:34:45 -07:00
|
|
|
// All samples from the last scrape must be stale markers.
|
|
|
|
for _, s := range appender.result[len(appender.result)-5:] {
|
|
|
|
if !value.IsStaleNaN(s.v) {
|
|
|
|
t.Fatalf("Appended last sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(s.v))
|
|
|
|
}
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2017-05-10 08:59:02 -07:00
|
|
|
}
|
|
|
|
|
2016-02-23 01:58:16 -08:00
|
|
|
func TestScrapeLoopRun(t *testing.T) {
|
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2016-02-23 01:58:16 -08:00
|
|
|
errc = make(chan error)
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return &nopAppender{} }
|
2016-02-23 01:58:16 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
time.Second,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2016-02-23 01:58:16 -08:00
|
|
|
|
|
|
|
// The loop must terminate during the initial offset if the context
|
|
|
|
// is canceled.
|
|
|
|
scraper.offsetDur = time.Hour
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(errc)
|
2016-02-23 01:58:16 -08:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait to make sure we are actually waiting on the offset.
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
cancel()
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
2019-10-10 02:47:30 -07:00
|
|
|
t.Fatalf("Cancellation during initial offset failed")
|
2016-02-23 01:58:16 -08:00
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-10-10 02:47:30 -07:00
|
|
|
// The provided timeout must cause cancellation of the context passed down to the
|
2016-02-23 01:58:16 -08:00
|
|
|
// scraper. The scraper has to respect the context.
|
|
|
|
scraper.offsetDur = 0
|
|
|
|
|
|
|
|
block := make(chan struct{})
|
2017-01-15 08:33:07 -08:00
|
|
|
scraper.scrapeFunc = func(ctx context.Context, _ io.Writer) error {
|
2016-02-23 01:58:16 -08:00
|
|
|
select {
|
|
|
|
case <-block:
|
|
|
|
case <-ctx.Done():
|
2017-01-15 08:33:07 -08:00
|
|
|
return ctx.Err()
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
return nil
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl = newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
time.Second,
|
|
|
|
100*time.Millisecond,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2016-02-23 01:58:16 -08:00
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(errc)
|
2016-02-23 01:58:16 -08:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != context.DeadlineExceeded {
|
|
|
|
t.Fatalf("Expected timeout error but got: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("Expected timeout error but got none")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We already caught the timeout error and are certainly in the loop.
|
|
|
|
// Let the scrapes returns immediately to cause no further timeout errors
|
|
|
|
// and check whether canceling the parent context terminates the loop.
|
|
|
|
close(block)
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
// Loop terminated as expected.
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Unexpected error: %s", err)
|
|
|
|
case <-time.After(3 * time.Second):
|
2019-10-10 02:47:30 -07:00
|
|
|
t.Fatalf("Loop did not terminate on context cancellation")
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
func TestScrapeLoopForcedErr(t *testing.T) {
|
|
|
|
var (
|
|
|
|
signal = make(chan struct{}, 1)
|
|
|
|
errc = make(chan error)
|
|
|
|
|
|
|
|
scraper = &testScraper{}
|
2020-07-31 00:33:56 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return &nopAppender{} }
|
2020-07-30 05:20:24 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
time.Second,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-07-30 05:20:24 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
forcedErr := fmt.Errorf("forced err")
|
|
|
|
sl.setForcedError(forcedErr)
|
|
|
|
|
|
|
|
scraper.scrapeFunc = func(context.Context, io.Writer) error {
|
|
|
|
t.Fatalf("should not be scraped")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(errc)
|
2020-07-30 05:20:24 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != forcedErr {
|
|
|
|
t.Fatalf("Expected forced error but got: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
t.Fatalf("Expected forced error but got none")
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape not stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-18 00:32:11 -07:00
|
|
|
func TestScrapeLoopMetadata(t *testing.T) {
|
|
|
|
var (
|
|
|
|
signal = make(chan struct{})
|
|
|
|
scraper = &testScraper{}
|
|
|
|
cache = newScrapeCache()
|
|
|
|
)
|
|
|
|
defer close(signal)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return nopAppender{} },
|
2018-05-18 00:32:11 -07:00
|
|
|
cache,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2018-05-18 00:32:11 -07:00
|
|
|
)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp := sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter
|
2018-05-18 00:32:11 -07:00
|
|
|
# HELP test_metric some help text
|
2018-10-05 09:11:16 -07:00
|
|
|
# UNIT test_metric metric
|
2018-05-18 00:32:11 -07:00
|
|
|
test_metric 1
|
|
|
|
# TYPE test_metric_no_help gauge
|
2018-10-05 09:11:16 -07:00
|
|
|
# HELP test_metric_no_type other help text
|
|
|
|
# EOF`), "application/openmetrics-text", time.Now())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
require.Equal(t, 1, total)
|
2018-05-18 00:32:11 -07:00
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
md, ok := cache.GetMetadata("test_metric")
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "expected metadata to be present")
|
|
|
|
require.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type")
|
|
|
|
require.Equal(t, "some help text", md.Help)
|
|
|
|
require.Equal(t, "metric", md.Unit)
|
2018-05-18 00:32:11 -07:00
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
md, ok = cache.GetMetadata("test_metric_no_help")
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "expected metadata to be present")
|
|
|
|
require.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type")
|
|
|
|
require.Equal(t, "", md.Help)
|
|
|
|
require.Equal(t, "", md.Unit)
|
2018-05-18 00:32:11 -07:00
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
md, ok = cache.GetMetadata("test_metric_no_type")
|
2020-10-29 02:43:23 -07:00
|
|
|
require.True(t, ok, "expected metadata to be present")
|
|
|
|
require.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type")
|
|
|
|
require.Equal(t, "other help text", md.Help)
|
|
|
|
require.Equal(t, "", md.Unit)
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2021-09-08 01:09:21 -07:00
|
|
|
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
2019-05-08 14:24:00 -07:00
|
|
|
// Need a full storage for correct Add/AddFast semantics.
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2021-09-08 01:09:21 -07:00
|
|
|
t.Cleanup(func() { s.Close() })
|
2019-05-08 14:24:00 -07:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
&testScraper{},
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-17 03:30:22 -07:00
|
|
|
s.Appender,
|
2019-05-08 14:24:00 -07:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2019-05-08 14:24:00 -07:00
|
|
|
)
|
2021-09-08 01:09:21 -07:00
|
|
|
t.Cleanup(func() { cancel() })
|
|
|
|
|
|
|
|
return ctx, sl
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoopSeriesAdded(t *testing.T) {
|
|
|
|
ctx, sl := simpleTestScrapeLoop(t)
|
2019-05-08 14:24:00 -07:00
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp := sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
require.Equal(t, 1, total)
|
|
|
|
require.Equal(t, 1, added)
|
|
|
|
require.Equal(t, 1, seriesAdded)
|
2019-05-08 14:24:00 -07:00
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp = sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, total)
|
|
|
|
require.Equal(t, 1, added)
|
|
|
|
require.Equal(t, 0, seriesAdded)
|
2019-05-08 14:24:00 -07:00
|
|
|
}
|
|
|
|
|
2022-12-07 19:09:43 -08:00
|
|
|
func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
target := &Target{
|
|
|
|
labels: labels.FromStrings("pod_label_invalid_012", "test"),
|
|
|
|
}
|
|
|
|
relabelConfig := []*relabel.Config{{
|
|
|
|
Action: relabel.LabelMap,
|
|
|
|
Regex: relabel.MustNewRegexp("pod_label_invalid_(.+)"),
|
|
|
|
Separator: ";",
|
|
|
|
Replacement: "$1",
|
|
|
|
}}
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
&testScraper{},
|
|
|
|
nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, target, true, relabelConfig)
|
|
|
|
},
|
|
|
|
nopMutator,
|
|
|
|
s.Appender,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2022-12-07 19:09:43 -08:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
false,
|
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-12-07 19:09:43 -08:00
|
|
|
nil,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
|
|
|
|
slApp := sl.appender(ctx)
|
|
|
|
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\n"), "", time.Time{})
|
|
|
|
require.ErrorContains(t, err, "invalid metric name or label names")
|
|
|
|
require.NoError(t, slApp.Rollback())
|
|
|
|
require.Equal(t, 1, total)
|
|
|
|
require.Equal(t, 0, added)
|
|
|
|
require.Equal(t, 0, seriesAdded)
|
|
|
|
}
|
|
|
|
|
2021-09-08 01:09:21 -07:00
|
|
|
func makeTestMetrics(n int) []byte {
|
|
|
|
// Construct a metrics string to parse
|
|
|
|
sb := bytes.Buffer{}
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
|
|
|
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
|
|
|
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
|
|
|
|
}
|
|
|
|
return sb.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkScrapeLoopAppend(b *testing.B) {
|
|
|
|
ctx, sl := simpleTestScrapeLoop(b)
|
|
|
|
|
|
|
|
slApp := sl.appender(ctx)
|
|
|
|
metrics := makeTestMetrics(100)
|
|
|
|
ts := time.Time{}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
ts = ts.Add(time.Second)
|
|
|
|
_, _, _, _ = sl.append(slApp, metrics, "", ts)
|
|
|
|
}
|
|
|
|
}
|
2021-10-22 01:06:44 -07:00
|
|
|
|
2021-09-08 01:09:21 -07:00
|
|
|
func BenchmarkScrapeLoopAppendOM(b *testing.B) {
|
|
|
|
ctx, sl := simpleTestScrapeLoop(b)
|
|
|
|
|
|
|
|
slApp := sl.appender(ctx)
|
|
|
|
metrics := makeTestMetrics(100)
|
|
|
|
ts := time.Time{}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
ts = ts.Add(time.Second)
|
|
|
|
_, _, _, _ = sl.append(slApp, metrics, "application/openmetrics-text", ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-03 06:55:35 -07:00
|
|
|
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|
|
|
appender := &collectResultAppender{}
|
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2017-05-03 06:55:35 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2017-05-03 06:55:35 -07:00
|
|
|
// Succeed once, several failures, then stop.
|
2017-09-08 05:34:45 -07:00
|
|
|
numScrapes := 0
|
|
|
|
|
2017-05-03 06:55:35 -07:00
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
2017-05-26 01:44:48 -07:00
|
|
|
numScrapes++
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch numScrapes {
|
|
|
|
case 1:
|
2017-05-03 06:55:35 -07:00
|
|
|
w.Write([]byte("metric_a 42\n"))
|
|
|
|
return nil
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 5:
|
2017-05-03 06:55:35 -07:00
|
|
|
cancel()
|
|
|
|
}
|
2019-03-25 16:01:12 -07:00
|
|
|
return errors.New("scrape failed")
|
2017-05-03 06:55:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2017-05-03 06:55:35 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:24:00 -07:00
|
|
|
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
2017-09-08 05:34:45 -07:00
|
|
|
// each scrape successful or not.
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, 27, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
|
|
|
|
require.True(t, value.IsStaleNaN(appender.result[6].v),
|
2019-11-04 15:43:42 -08:00
|
|
|
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
|
2017-05-03 08:51:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|
|
|
appender := &collectResultAppender{}
|
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2017-05-03 08:51:45 -07:00
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2017-05-03 08:51:45 -07:00
|
|
|
numScrapes = 0
|
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2017-05-03 08:51:45 -07:00
|
|
|
|
|
|
|
// Succeed once, several failures, then stop.
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
2017-05-26 01:44:48 -07:00
|
|
|
numScrapes++
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch numScrapes {
|
|
|
|
case 1:
|
2017-05-03 08:51:45 -07:00
|
|
|
w.Write([]byte("metric_a 42\n"))
|
|
|
|
return nil
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 2:
|
2017-05-03 08:51:45 -07:00
|
|
|
w.Write([]byte("7&-\n"))
|
|
|
|
return nil
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 3:
|
2017-05-03 08:51:45 -07:00
|
|
|
cancel()
|
|
|
|
}
|
2019-03-25 16:01:12 -07:00
|
|
|
return errors.New("scrape failed")
|
2017-05-03 08:51:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2017-05-03 08:51:45 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:24:00 -07:00
|
|
|
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
2017-09-08 05:34:45 -07:00
|
|
|
// each scrape successful or not.
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, 17, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 42.0, appender.result[0].v, "Appended first sample not as expected")
|
|
|
|
require.True(t, value.IsStaleNaN(appender.result[6].v),
|
2019-11-04 15:43:42 -08:00
|
|
|
"Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(appender.result[6].v))
|
2019-03-28 10:07:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoopCache(t *testing.T) {
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2019-03-28 10:07:14 -07:00
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-17 03:30:22 -07:00
|
|
|
appender := &collectResultAppender{}
|
2019-03-28 10:07:14 -07:00
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2019-03-28 10:07:14 -07:00
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { appender.next = s.Appender(ctx); return appender }
|
2019-03-28 10:07:14 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2019-03-28 10:07:14 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
numScrapes := 0
|
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch numScrapes {
|
|
|
|
case 1, 2:
|
2019-03-28 10:07:14 -07:00
|
|
|
if _, ok := sl.cache.series["metric_a"]; !ok {
|
|
|
|
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
|
|
|
}
|
|
|
|
if _, ok := sl.cache.series["metric_b"]; !ok {
|
|
|
|
t.Errorf("metric_b missing from cache after scrape %d", numScrapes)
|
|
|
|
}
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 3:
|
2019-03-28 10:07:14 -07:00
|
|
|
if _, ok := sl.cache.series["metric_a"]; !ok {
|
|
|
|
t.Errorf("metric_a missing from cache after scrape %d", numScrapes)
|
|
|
|
}
|
|
|
|
if _, ok := sl.cache.series["metric_b"]; ok {
|
|
|
|
t.Errorf("metric_b present in cache after scrape %d", numScrapes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
numScrapes++
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch numScrapes {
|
|
|
|
case 1:
|
2019-03-28 10:07:14 -07:00
|
|
|
w.Write([]byte("metric_a 42\nmetric_b 43\n"))
|
|
|
|
return nil
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 3:
|
2019-03-28 10:07:14 -07:00
|
|
|
w.Write([]byte("metric_a 44\n"))
|
|
|
|
return nil
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case 4:
|
2019-03-28 10:07:14 -07:00
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return fmt.Errorf("scrape failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2019-03-28 10:07:14 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
|
2019-05-08 14:24:00 -07:00
|
|
|
// 1 successfully scraped sample, 1 stale marker after first fail, 5 report samples for
|
2019-03-28 10:07:14 -07:00
|
|
|
// each scrape successful or not.
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, 26, len(appender.result), "Appended samples not as expected:\n%s", appender)
|
2019-03-28 10:52:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2019-03-28 10:52:46 -07:00
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
sapp := s.Appender(context.Background())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2019-03-28 10:52:46 -07:00
|
|
|
appender := &collectResultAppender{next: sapp}
|
|
|
|
var (
|
2020-02-12 23:53:07 -08:00
|
|
|
signal = make(chan struct{}, 1)
|
2019-03-28 10:52:46 -07:00
|
|
|
scraper = &testScraper{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2019-03-28 10:52:46 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2019-03-28 10:52:46 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
numScrapes := 0
|
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
|
|
|
numScrapes++
|
|
|
|
if numScrapes < 5 {
|
|
|
|
s := ""
|
|
|
|
for i := 0; i < 500; i++ {
|
|
|
|
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
|
|
|
|
}
|
|
|
|
w.Write([]byte(fmt.Sprintf(s + "&")))
|
|
|
|
} else {
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2019-03-28 10:52:46 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(sl.cache.series) > 2000 {
|
|
|
|
t.Fatalf("More than 2000 series cached. Got: %d", len(sl.cache.series))
|
|
|
|
}
|
2017-05-03 06:55:35 -07:00
|
|
|
}
|
|
|
|
|
2017-04-11 07:42:17 -07:00
|
|
|
func TestScrapeLoopAppend(t *testing.T) {
|
2018-02-15 06:26:24 -08:00
|
|
|
tests := []struct {
|
|
|
|
title string
|
|
|
|
honorLabels bool
|
|
|
|
scrapeLabels string
|
|
|
|
discoveryLabels []string
|
|
|
|
expLset labels.Labels
|
|
|
|
expValue float64
|
|
|
|
}{
|
2017-04-11 07:42:17 -07:00
|
|
|
{
|
2018-02-15 06:26:24 -08:00
|
|
|
// When "honor_labels" is not set
|
|
|
|
// label name collision is handler by adding a prefix.
|
|
|
|
title: "Label name collision",
|
|
|
|
honorLabels: false,
|
|
|
|
scrapeLabels: `metric{n="1"} 0`,
|
|
|
|
discoveryLabels: []string{"n", "2"},
|
|
|
|
expLset: labels.FromStrings("__name__", "metric", "exported_n", "1", "n", "2"),
|
|
|
|
expValue: 0,
|
2019-11-20 07:50:05 -08:00
|
|
|
}, {
|
|
|
|
// When "honor_labels" is not set
|
|
|
|
// exported label from discovery don't get overwritten
|
|
|
|
title: "Label name collision",
|
|
|
|
honorLabels: false,
|
|
|
|
scrapeLabels: `metric 0`,
|
|
|
|
discoveryLabels: []string{"n", "2", "exported_n", "2"},
|
|
|
|
expLset: labels.FromStrings("__name__", "metric", "n", "2", "exported_n", "2"),
|
|
|
|
expValue: 0,
|
2018-02-15 06:26:24 -08:00
|
|
|
}, {
|
|
|
|
// Labels with no value need to be removed as these should not be ingested.
|
|
|
|
title: "Delete Empty labels",
|
|
|
|
honorLabels: false,
|
|
|
|
scrapeLabels: `metric{n=""} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
expLset: labels.FromStrings("__name__", "metric"),
|
|
|
|
expValue: 0,
|
|
|
|
}, {
|
|
|
|
// Honor Labels should ignore labels with the same name.
|
|
|
|
title: "Honor Labels",
|
|
|
|
honorLabels: true,
|
|
|
|
scrapeLabels: `metric{n1="1" n2="2"} 0`,
|
|
|
|
discoveryLabels: []string{"n1", "0"},
|
|
|
|
expLset: labels.FromStrings("__name__", "metric", "n1", "1", "n2", "2"),
|
|
|
|
expValue: 0,
|
|
|
|
}, {
|
|
|
|
title: "Stale - NaN",
|
|
|
|
honorLabels: false,
|
|
|
|
scrapeLabels: `metric NaN`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
expLset: labels.FromStrings("__name__", "metric"),
|
|
|
|
expValue: float64(value.NormalNaN),
|
2017-04-11 07:42:17 -07:00
|
|
|
},
|
|
|
|
}
|
2018-02-15 06:26:24 -08:00
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
|
|
|
|
discoveryLabels := &Target{
|
|
|
|
labels: labels.FromStrings(test.discoveryLabels...),
|
|
|
|
}
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2018-02-15 06:26:24 -08:00
|
|
|
nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
2018-04-12 07:54:53 -07:00
|
|
|
return mutateSampleLabels(l, discoveryLabels, test.honorLabels, nil)
|
2018-02-15 06:26:24 -08:00
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels {
|
2018-04-12 07:54:53 -07:00
|
|
|
return mutateReportSampleLabels(l, discoveryLabels)
|
2018-02-15 06:26:24 -08:00
|
|
|
},
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2018-02-15 06:26:24 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2018-02-15 06:26:24 -08:00
|
|
|
|
|
|
|
expected := []sample{
|
|
|
|
{
|
|
|
|
metric: test.expLset,
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: test.expValue,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// When the expected value is NaN
|
|
|
|
// DeepEqual will report NaNs as being different,
|
|
|
|
// so replace it with the expected one.
|
|
|
|
if test.expValue == float64(value.NormalNaN) {
|
|
|
|
app.result[0].v = expected[0].v
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Test:%s", test.title)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, expected, app.result)
|
2017-04-11 07:42:17 -07:00
|
|
|
}
|
2017-04-14 02:41:18 -07:00
|
|
|
}
|
|
|
|
|
2021-10-15 11:31:03 -07:00
|
|
|
func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
|
|
|
|
testcases := map[string]struct {
|
|
|
|
targetLabels []string
|
|
|
|
exposedLabels string
|
|
|
|
expected []string
|
|
|
|
}{
|
|
|
|
"One target label collides with existing label": {
|
|
|
|
targetLabels: []string{"foo", "2"},
|
|
|
|
exposedLabels: `metric{foo="1"} 0`,
|
|
|
|
expected: []string{"__name__", "metric", "exported_foo", "1", "foo", "2"},
|
|
|
|
},
|
|
|
|
|
|
|
|
"One target label collides with existing label, plus target label already with prefix 'exported'": {
|
|
|
|
targetLabels: []string{"foo", "2", "exported_foo", "3"},
|
|
|
|
exposedLabels: `metric{foo="1"} 0`,
|
|
|
|
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "3", "foo", "2"},
|
|
|
|
},
|
|
|
|
"One target label collides with existing label, plus existing label already with prefix 'exported": {
|
|
|
|
targetLabels: []string{"foo", "3"},
|
|
|
|
exposedLabels: `metric{foo="1" exported_foo="2"} 0`,
|
|
|
|
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2", "foo", "3"},
|
|
|
|
},
|
|
|
|
"One target label collides with existing label, both already with prefix 'exported'": {
|
|
|
|
targetLabels: []string{"exported_foo", "2"},
|
|
|
|
exposedLabels: `metric{exported_foo="1"} 0`,
|
|
|
|
expected: []string{"__name__", "metric", "exported_exported_foo", "1", "exported_foo", "2"},
|
|
|
|
},
|
|
|
|
"Two target labels collide with existing labels, both with and without prefix 'exported'": {
|
|
|
|
targetLabels: []string{"foo", "3", "exported_foo", "4"},
|
|
|
|
exposedLabels: `metric{foo="1" exported_foo="2"} 0`,
|
2021-10-22 01:06:44 -07:00
|
|
|
expected: []string{
|
|
|
|
"__name__", "metric", "exported_exported_foo", "1", "exported_exported_exported_foo",
|
|
|
|
"2", "exported_foo", "4", "foo", "3",
|
|
|
|
},
|
2021-10-15 11:31:03 -07:00
|
|
|
},
|
|
|
|
"Extreme example": {
|
|
|
|
targetLabels: []string{"foo", "0", "exported_exported_foo", "1", "exported_exported_exported_foo", "2"},
|
|
|
|
exposedLabels: `metric{foo="3" exported_foo="4" exported_exported_exported_foo="5"} 0`,
|
|
|
|
expected: []string{
|
|
|
|
"__name__", "metric",
|
|
|
|
"exported_exported_exported_exported_exported_foo", "5",
|
|
|
|
"exported_exported_exported_exported_foo", "3",
|
|
|
|
"exported_exported_exported_foo", "2",
|
|
|
|
"exported_exported_foo", "1",
|
|
|
|
"exported_foo", "4",
|
|
|
|
"foo", "0",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tc := range testcases {
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
sl := newScrapeLoop(context.Background(), nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
|
|
|
|
},
|
|
|
|
nil,
|
2023-05-10 16:59:21 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false,
|
2021-10-15 11:31:03 -07:00
|
|
|
)
|
|
|
|
slApp := sl.appender(context.Background())
|
|
|
|
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
|
|
|
|
require.Equal(t, []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(tc.expected...),
|
|
|
|
t: timestamp.FromTime(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)),
|
|
|
|
v: 0,
|
|
|
|
},
|
|
|
|
}, app.result)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 19:31:48 -07:00
|
|
|
func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|
|
|
// collectResultAppender's AddFast always returns ErrNotFound if we don't give it a next.
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2020-03-25 19:31:48 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2020-03-25 19:31:48 -07:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-03-25 19:31:48 -07:00
|
|
|
)
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
fakeRef := storage.SeriesRef(1)
|
2020-03-25 19:31:48 -07:00
|
|
|
expValue := float64(1)
|
2022-12-20 08:54:07 -08:00
|
|
|
metric := []byte(`metric{n="1"} 1`)
|
2023-05-10 16:59:21 -07:00
|
|
|
p, warning := textparse.New(metric, "", false)
|
2022-02-08 01:57:56 -08:00
|
|
|
require.NoError(t, warning)
|
2020-03-25 19:31:48 -07:00
|
|
|
|
|
|
|
var lset labels.Labels
|
|
|
|
p.Next()
|
2022-12-20 08:54:07 -08:00
|
|
|
p.Metric(&lset)
|
2020-03-25 19:31:48 -07:00
|
|
|
hash := lset.Hash()
|
|
|
|
|
|
|
|
// Create a fake entry in the cache
|
2022-12-20 08:54:07 -08:00
|
|
|
sl.cache.addRef(metric, fakeRef, lset, hash)
|
2020-03-25 19:31:48 -07:00
|
|
|
now := time.Now()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2022-12-20 08:54:07 -08:00
|
|
|
_, _, _, err := sl.append(slApp, metric, "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2020-03-25 19:31:48 -07:00
|
|
|
|
|
|
|
expected := []sample{
|
|
|
|
{
|
|
|
|
metric: lset,
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: expValue,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, expected, app.result)
|
2020-03-25 19:31:48 -07:00
|
|
|
}
|
|
|
|
|
2018-01-09 07:43:28 -08:00
|
|
|
func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|
|
|
resApp := &collectResultAppender{}
|
|
|
|
app := &limitAppender{Appender: resApp, limit: 1}
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2018-01-09 07:43:28 -08:00
|
|
|
nil, nil, nil,
|
2020-01-29 09:47:36 -08:00
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
if l.Has("deleteme") {
|
2022-05-30 07:37:16 -07:00
|
|
|
return labels.EmptyLabels()
|
2020-01-29 09:47:36 -08:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
},
|
2018-01-09 07:43:28 -08:00
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
app.limit, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2018-01-09 07:43:28 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
// Get the value of the Counter before performing the append.
|
|
|
|
beforeMetric := dto.Metric{}
|
|
|
|
err := targetScrapeSampleLimit.Write(&beforeMetric)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2018-01-09 07:43:28 -08:00
|
|
|
beforeMetricValue := beforeMetric.GetCounter().GetValue()
|
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err := sl.append(app, []byte("metric_a 1\nmetric_b 1\nmetric_c 1\n"), "", now)
|
2018-01-09 07:43:28 -08:00
|
|
|
if err != errSampleLimit {
|
|
|
|
t.Fatalf("Did not see expected sample limit error: %s", err)
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, slApp.Rollback())
|
|
|
|
require.Equal(t, 3, total)
|
|
|
|
require.Equal(t, 3, added)
|
|
|
|
require.Equal(t, 1, seriesAdded)
|
2018-01-09 07:43:28 -08:00
|
|
|
|
2019-02-10 01:46:20 -08:00
|
|
|
// Check that the Counter has been incremented a single time for the scrape,
|
2018-01-09 07:43:28 -08:00
|
|
|
// not multiple times for each sample.
|
|
|
|
metric := dto.Metric{}
|
|
|
|
err = targetScrapeSampleLimit.Write(&metric)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2018-01-09 07:43:28 -08:00
|
|
|
value := metric.GetCounter().GetValue()
|
2019-11-04 15:43:42 -08:00
|
|
|
change := value - beforeMetricValue
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
|
2018-01-09 07:43:28 -08:00
|
|
|
|
|
|
|
// And verify that we got the samples that fit under the limit.
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, resApp.rolledbackResult, "Appended samples not as expected:\n%s", appender)
|
2020-01-29 09:47:36 -08:00
|
|
|
|
|
|
|
now = time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp = sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err = sl.append(slApp, []byte("metric_a 1\nmetric_b 1\nmetric_c{deleteme=\"yes\"} 1\nmetric_d 1\nmetric_e 1\nmetric_f 1\nmetric_g 1\nmetric_h{deleteme=\"yes\"} 1\nmetric_i{deleteme=\"yes\"} 1\n"), "", now)
|
2020-01-29 09:47:36 -08:00
|
|
|
if err != errSampleLimit {
|
|
|
|
t.Fatalf("Did not see expected sample limit error: %s", err)
|
|
|
|
}
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, slApp.Rollback())
|
|
|
|
require.Equal(t, 9, total)
|
|
|
|
require.Equal(t, 6, added)
|
|
|
|
require.Equal(t, 0, seriesAdded)
|
2018-01-09 07:43:28 -08:00
|
|
|
}
|
|
|
|
|
2023-04-21 12:14:19 -07:00
|
|
|
func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
|
|
|
resApp := &collectResultAppender{}
|
|
|
|
app := &bucketLimitAppender{Appender: resApp, limit: 2}
|
|
|
|
|
|
|
|
sl := newScrapeLoop(context.Background(),
|
|
|
|
nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
if l.Has("deleteme") {
|
|
|
|
return labels.EmptyLabels()
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
},
|
|
|
|
nopMutator,
|
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
app.limit, 0,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
false,
|
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2023-04-21 12:14:19 -07:00
|
|
|
nil,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
|
|
|
|
metric := dto.Metric{}
|
|
|
|
err := targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
|
|
|
require.NoError(t, err)
|
|
|
|
beforeMetricValue := metric.GetCounter().GetValue()
|
|
|
|
|
2023-04-24 10:41:04 -07:00
|
|
|
nativeHistogram := prometheus.NewHistogramVec(
|
|
|
|
prometheus.HistogramOpts{
|
|
|
|
Namespace: "testing",
|
|
|
|
Name: "example_native_histogram",
|
|
|
|
Help: "This is used for testing",
|
|
|
|
ConstLabels: map[string]string{"some": "value"},
|
|
|
|
NativeHistogramBucketFactor: 1.1, // 10% increase from bucket to bucket
|
|
|
|
NativeHistogramMaxBucketNumber: 100, // intentionally higher than the limit we'll use in the scraper
|
|
|
|
},
|
|
|
|
[]string{"size"},
|
|
|
|
)
|
2023-04-21 12:14:19 -07:00
|
|
|
registry := prometheus.NewRegistry()
|
|
|
|
registry.Register(nativeHistogram)
|
2023-04-24 10:41:04 -07:00
|
|
|
nativeHistogram.WithLabelValues("S").Observe(1.0)
|
|
|
|
nativeHistogram.WithLabelValues("M").Observe(1.0)
|
|
|
|
nativeHistogram.WithLabelValues("L").Observe(1.0)
|
|
|
|
nativeHistogram.WithLabelValues("M").Observe(10.0)
|
|
|
|
nativeHistogram.WithLabelValues("L").Observe(10.0) // in different bucket since > 1*1.1
|
2023-04-21 12:14:19 -07:00
|
|
|
|
|
|
|
gathered, err := registry.Gather()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, gathered)
|
|
|
|
|
|
|
|
histogramMetricFamily := gathered[0]
|
2023-05-03 23:36:44 -07:00
|
|
|
msg, err := MetricFamilyToProtobuf(histogramMetricFamily)
|
2023-04-21 12:14:19 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
total, added, seriesAdded, err := sl.append(app, msg, "application/vnd.google.protobuf", now)
|
|
|
|
require.NoError(t, err)
|
2023-04-24 10:41:04 -07:00
|
|
|
require.Equal(t, 3, total)
|
|
|
|
require.Equal(t, 3, added)
|
|
|
|
require.Equal(t, 3, seriesAdded)
|
2023-04-21 12:14:19 -07:00
|
|
|
|
|
|
|
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
|
|
|
require.NoError(t, err)
|
|
|
|
metricValue := metric.GetCounter().GetValue()
|
|
|
|
require.Equal(t, beforeMetricValue, metricValue)
|
|
|
|
beforeMetricValue = metricValue
|
|
|
|
|
2023-04-24 10:41:04 -07:00
|
|
|
nativeHistogram.WithLabelValues("L").Observe(100.0) // in different bucket since > 10*1.1
|
2023-04-21 12:14:19 -07:00
|
|
|
|
|
|
|
gathered, err = registry.Gather()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, gathered)
|
|
|
|
|
|
|
|
histogramMetricFamily = gathered[0]
|
2023-05-03 23:36:44 -07:00
|
|
|
msg, err = MetricFamilyToProtobuf(histogramMetricFamily)
|
2023-04-21 12:14:19 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
now = time.Now()
|
|
|
|
total, added, seriesAdded, err = sl.append(app, msg, "application/vnd.google.protobuf", now)
|
2023-04-21 12:14:19 -07:00
|
|
|
if err != errBucketLimit {
|
|
|
|
t.Fatalf("Did not see expected histogram bucket limit error: %s", err)
|
|
|
|
}
|
|
|
|
require.NoError(t, app.Rollback())
|
2023-04-24 10:41:04 -07:00
|
|
|
require.Equal(t, 3, total)
|
|
|
|
require.Equal(t, 3, added)
|
2023-04-21 12:14:19 -07:00
|
|
|
require.Equal(t, 0, seriesAdded)
|
|
|
|
|
|
|
|
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
|
|
|
require.NoError(t, err)
|
|
|
|
metricValue = metric.GetCounter().GetValue()
|
|
|
|
require.Equal(t, beforeMetricValue+1, metricValue)
|
|
|
|
}
|
|
|
|
|
2017-09-15 02:08:51 -07:00
|
|
|
func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|
|
|
// This is a regression test for the scrape loop cache not properly maintaining
|
|
|
|
// IDs when the string representation of a metric changes across a scrape. Thus
|
|
|
|
// we use a real storage appender here.
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2017-09-15 02:08:51 -07:00
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-17 03:30:22 -07:00
|
|
|
capp := &collectResultAppender{}
|
2017-09-15 02:08:51 -07:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-15 02:08:51 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { capp.next = s.Appender(ctx); return capp },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-15 02:08:51 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp = sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err = sl.append(slApp, []byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2017-09-15 02:08:51 -07:00
|
|
|
|
|
|
|
// DeepEqual will report NaNs as being different, so replace with a different value.
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
|
|
|
t: timestamp.FromTime(now.Add(time.Minute)),
|
|
|
|
v: 2,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
2017-09-15 02:08:51 -07:00
|
|
|
}
|
|
|
|
|
2017-04-14 02:41:18 -07:00
|
|
|
func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|
|
|
app := &collectResultAppender{}
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-08 05:34:45 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-05-26 01:44:48 -07:00
|
|
|
)
|
2017-04-14 02:41:18 -07:00
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte("metric_a 1\n"), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp = sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2017-04-14 02:41:18 -07:00
|
|
|
|
|
|
|
ingestedNaN := math.Float64bits(app.result[1].v)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, value.StaleNaN, ingestedNaN, "Appended stale sample wasn't as expected")
|
2017-04-14 02:41:18 -07:00
|
|
|
|
|
|
|
// DeepEqual will report NaNs as being different, so replace with a different value.
|
|
|
|
app.result[1].v = 42
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
|
|
|
|
t: timestamp.FromTime(now.Add(time.Second)),
|
|
|
|
v: 42,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
2017-04-11 07:42:17 -07:00
|
|
|
}
|
|
|
|
|
2017-04-28 08:36:36 -07:00
|
|
|
func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|
|
|
app := &collectResultAppender{}
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-08 05:34:45 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-05-26 01:44:48 -07:00
|
|
|
)
|
2017-04-28 08:36:36 -07:00
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte("metric_a 1 1000\n"), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp = sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err = sl.append(slApp, []byte(""), "", now.Add(time.Second))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2017-04-28 08:36:36 -07:00
|
|
|
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
|
|
|
|
t: 1000,
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
2017-05-29 06:08:55 -07:00
|
|
|
}
|
|
|
|
|
2021-03-16 02:47:45 -07:00
|
|
|
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
title string
|
|
|
|
scrapeText string
|
|
|
|
discoveryLabels []string
|
|
|
|
samples []sample
|
|
|
|
exemplars []exemplar.Exemplar
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
title: "Metric without exemplars",
|
|
|
|
scrapeText: "metric_total{n=\"1\"} 0\n# EOF",
|
|
|
|
discoveryLabels: []string{"n", "2"},
|
|
|
|
samples: []sample{{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
|
|
|
|
v: 0,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
title: "Metric with exemplars",
|
|
|
|
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0\n# EOF",
|
|
|
|
discoveryLabels: []string{"n", "2"},
|
|
|
|
samples: []sample{{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
|
|
|
|
v: 0,
|
|
|
|
}},
|
|
|
|
exemplars: []exemplar.Exemplar{
|
|
|
|
{Labels: labels.FromStrings("a", "abc"), Value: 1},
|
|
|
|
},
|
2021-10-22 01:06:44 -07:00
|
|
|
},
|
|
|
|
{
|
2021-03-16 02:47:45 -07:00
|
|
|
title: "Metric with exemplars and TS",
|
|
|
|
scrapeText: "metric_total{n=\"1\"} 0 # {a=\"abc\"} 1.0 10000\n# EOF",
|
|
|
|
discoveryLabels: []string{"n", "2"},
|
|
|
|
samples: []sample{{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "exported_n", "1", "n", "2"),
|
|
|
|
v: 0,
|
|
|
|
}},
|
|
|
|
exemplars: []exemplar.Exemplar{
|
|
|
|
{Labels: labels.FromStrings("a", "abc"), Value: 1, Ts: 10000000, HasTs: true},
|
|
|
|
},
|
2021-10-22 01:06:44 -07:00
|
|
|
},
|
|
|
|
{
|
2021-03-16 02:47:45 -07:00
|
|
|
title: "Two metrics and exemplars",
|
|
|
|
scrapeText: `metric_total{n="1"} 1 # {t="1"} 1.0 10000
|
|
|
|
metric_total{n="2"} 2 # {t="2"} 2.0 20000
|
|
|
|
# EOF`,
|
|
|
|
samples: []sample{{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
|
|
|
|
v: 1,
|
|
|
|
}, {
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "n", "2"),
|
|
|
|
v: 2,
|
|
|
|
}},
|
|
|
|
exemplars: []exemplar.Exemplar{
|
|
|
|
{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
|
|
|
|
{Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.title, func(t *testing.T) {
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
|
|
|
|
discoveryLabels := &Target{
|
|
|
|
labels: labels.FromStrings(test.discoveryLabels...),
|
|
|
|
}
|
|
|
|
|
|
|
|
sl := newScrapeLoop(context.Background(),
|
|
|
|
nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, discoveryLabels, false, nil)
|
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateReportSampleLabels(l, discoveryLabels)
|
|
|
|
},
|
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2021-03-16 02:47:45 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
for i := range test.samples {
|
|
|
|
test.samples[i].t = timestamp.FromTime(now)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to set the timestamp for expected exemplars that does not have a timestamp.
|
|
|
|
for i := range test.exemplars {
|
|
|
|
if test.exemplars[i].Ts == 0 {
|
|
|
|
test.exemplars[i].Ts = timestamp.FromTime(now)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, _, _, err := sl.append(app, []byte(test.scrapeText), "application/openmetrics-text", now)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, app.Commit())
|
|
|
|
require.Equal(t, test.samples, app.result)
|
|
|
|
require.Equal(t, test.exemplars, app.resultExemplars)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
|
|
|
scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000
|
|
|
|
# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000
|
|
|
|
# EOF`}
|
|
|
|
samples := []sample{{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
|
|
|
|
v: 1,
|
|
|
|
}, {
|
|
|
|
metric: labels.FromStrings("__name__", "metric_total", "n", "1"),
|
|
|
|
v: 2,
|
|
|
|
}}
|
|
|
|
exemplars := []exemplar.Exemplar{
|
|
|
|
{Labels: labels.FromStrings("t", "1"), Value: 1, Ts: 10000000, HasTs: true},
|
|
|
|
{Labels: labels.FromStrings("t", "2"), Value: 2, Ts: 20000000, HasTs: true},
|
|
|
|
}
|
|
|
|
discoveryLabels := &Target{
|
|
|
|
labels: labels.FromStrings(),
|
|
|
|
}
|
|
|
|
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
|
|
|
|
sl := newScrapeLoop(context.Background(),
|
|
|
|
nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, discoveryLabels, false, nil)
|
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateReportSampleLabels(l, discoveryLabels)
|
|
|
|
},
|
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2021-03-16 02:47:45 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
for i := range samples {
|
|
|
|
ts := now.Add(time.Second * time.Duration(i))
|
|
|
|
samples[i].t = timestamp.FromTime(ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to set the timestamp for expected exemplars that does not have a timestamp.
|
|
|
|
for i := range exemplars {
|
|
|
|
if exemplars[i].Ts == 0 {
|
|
|
|
ts := now.Add(time.Second * time.Duration(i))
|
|
|
|
exemplars[i].Ts = timestamp.FromTime(ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, st := range scrapeText {
|
|
|
|
_, _, _, err := sl.append(app, []byte(st), "application/openmetrics-text", timestamp.Time(samples[i].t))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, app.Commit())
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, samples, app.result)
|
|
|
|
require.Equal(t, exemplars, app.resultExemplars)
|
|
|
|
}
|
|
|
|
|
2017-06-14 19:08:03 -07:00
|
|
|
func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|
|
|
var (
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper = &testScraper{}
|
|
|
|
appender = &collectResultAppender{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2017-06-14 19:08:03 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2017-06-14 19:08:03 -07:00
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
|
|
|
cancel()
|
2019-03-25 16:01:12 -07:00
|
|
|
return errors.New("scrape failed")
|
2017-06-14 19:08:03 -07:00
|
|
|
}
|
|
|
|
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
|
2017-06-14 19:08:03 -07:00
|
|
|
}
|
|
|
|
|
2017-06-16 05:09:50 -07:00
|
|
|
func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|
|
|
var (
|
2017-09-08 05:34:45 -07:00
|
|
|
scraper = &testScraper{}
|
|
|
|
appender = &collectResultAppender{}
|
2020-07-24 07:10:51 -07:00
|
|
|
app = func(ctx context.Context) storage.Appender { return appender }
|
2017-06-16 05:09:50 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
app,
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2017-06-16 05:09:50 -07:00
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
|
|
|
cancel()
|
2017-09-08 05:34:45 -07:00
|
|
|
w.Write([]byte("a{l=\"\xff\"} 1\n"))
|
2017-06-16 05:09:50 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 0.0, appender.result[0].v, "bad 'up' value")
|
2017-06-16 05:09:50 -07:00
|
|
|
}
|
|
|
|
|
2017-05-03 09:20:07 -07:00
|
|
|
type errorAppender struct {
|
|
|
|
collectResultAppender
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (app *errorAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
2017-07-04 05:55:33 -07:00
|
|
|
switch lset.Get(model.MetricNameLabel) {
|
|
|
|
case "out_of_order":
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, storage.ErrOutOfOrderSample
|
2017-07-04 05:55:33 -07:00
|
|
|
case "amend":
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, storage.ErrDuplicateSampleForTimestamp
|
2017-07-04 05:55:33 -07:00
|
|
|
case "out_of_bounds":
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, storage.ErrOutOfBounds
|
2017-07-04 05:55:33 -07:00
|
|
|
default:
|
2021-02-18 04:07:00 -08:00
|
|
|
return app.collectResultAppender.Append(ref, lset, t, v)
|
2017-05-03 09:20:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-04 05:55:33 -07:00
|
|
|
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
|
2017-05-03 09:20:07 -07:00
|
|
|
app := &errorAppender{}
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-05-26 01:44:48 -07:00
|
|
|
nil,
|
2017-09-08 05:34:45 -07:00
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-05-26 01:44:48 -07:00
|
|
|
)
|
2017-05-03 09:20:07 -07:00
|
|
|
|
|
|
|
now := time.Unix(1, 0)
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err := sl.append(slApp, []byte("out_of_order 1\namend 1\nnormal 1\nout_of_bounds 1\n"), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2017-05-03 09:20:07 -07:00
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings(model.MetricNameLabel, "normal"),
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, app.result, "Appended samples not as expected:\n%s", appender)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 4, total)
|
|
|
|
require.Equal(t, 4, added)
|
|
|
|
require.Equal(t, 1, seriesAdded)
|
2017-07-04 05:55:33 -07:00
|
|
|
}
|
2017-05-03 09:20:07 -07:00
|
|
|
|
2017-07-04 05:55:33 -07:00
|
|
|
func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
|
|
|
app := &collectResultAppender{}
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2017-09-08 05:34:45 -07:00
|
|
|
nil,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender {
|
2017-07-04 05:55:33 -07:00
|
|
|
return &timeLimitAppender{
|
|
|
|
Appender: app,
|
|
|
|
maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
|
|
|
|
}
|
|
|
|
},
|
2018-05-18 00:32:11 -07:00
|
|
|
nil,
|
2019-03-12 03:46:15 -07:00
|
|
|
0,
|
2019-03-15 03:04:15 -07:00
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2017-07-04 05:55:33 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now().Add(20 * time.Minute)
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
total, added, seriesAdded, err := sl.append(slApp, []byte("normal 1\n"), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
require.Equal(t, 1, total)
|
|
|
|
require.Equal(t, 1, added)
|
|
|
|
require.Equal(t, 0, seriesAdded)
|
2017-05-03 09:20:07 -07:00
|
|
|
}
|
|
|
|
|
2016-02-28 14:59:03 -08:00
|
|
|
func TestTargetScraperScrapeOK(t *testing.T) {
|
2017-04-04 10:26:28 -07:00
|
|
|
const (
|
|
|
|
configTimeout = 1500 * time.Millisecond
|
2021-06-18 00:38:12 -07:00
|
|
|
expectedTimeout = "1.5"
|
2017-04-04 10:26:28 -07:00
|
|
|
)
|
|
|
|
|
2022-10-12 00:48:25 -07:00
|
|
|
var protobufParsing bool
|
|
|
|
|
2016-02-28 14:59:03 -08:00
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2022-10-12 00:48:25 -07:00
|
|
|
if protobufParsing {
|
|
|
|
accept := r.Header.Get("Accept")
|
|
|
|
if !strings.HasPrefix(accept, "application/vnd.google.protobuf;") {
|
|
|
|
t.Errorf("Expected Accept header to prefer application/vnd.google.protobuf, got %q", accept)
|
|
|
|
}
|
2017-09-22 09:06:43 -07:00
|
|
|
}
|
|
|
|
|
2017-04-05 11:56:22 -07:00
|
|
|
timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds")
|
2017-04-04 10:26:28 -07:00
|
|
|
if timeout != expectedTimeout {
|
2017-09-22 09:06:43 -07:00
|
|
|
t.Errorf("Expected scrape timeout header %q, got %q", expectedTimeout, timeout)
|
2017-04-04 10:26:28 -07:00
|
|
|
}
|
|
|
|
|
2016-02-28 14:59:03 -08:00
|
|
|
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
|
|
|
w.Write([]byte("metric_a 1\nmetric_b 2\n"))
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2022-10-12 00:48:25 -07:00
|
|
|
runTest := func(acceptHeader string) {
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
|
|
|
labels: labels.FromStrings(
|
|
|
|
model.SchemeLabel, serverURL.Scheme,
|
|
|
|
model.AddressLabel, serverURL.Host,
|
|
|
|
),
|
|
|
|
},
|
|
|
|
client: http.DefaultClient,
|
|
|
|
timeout: configTimeout,
|
|
|
|
acceptHeader: acceptHeader,
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
contentType, err := ts.scrape(context.Background(), &buf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, "text/plain; version=0.0.4", contentType)
|
|
|
|
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
|
2022-10-12 00:48:25 -07:00
|
|
|
runTest(scrapeAcceptHeader)
|
|
|
|
protobufParsing = true
|
|
|
|
runTest(scrapeAcceptHeaderWithProtobuf)
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|
|
|
block := make(chan struct{})
|
|
|
|
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
<-block
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
2016-12-29 00:27:30 -08:00
|
|
|
labels: labels.FromStrings(
|
|
|
|
model.SchemeLabel, serverURL.Scheme,
|
|
|
|
model.AddressLabel, serverURL.Host,
|
|
|
|
),
|
2016-02-28 14:59:03 -08:00
|
|
|
},
|
2022-10-12 00:48:25 -07:00
|
|
|
client: http.DefaultClient,
|
|
|
|
acceptHeader: scrapeAcceptHeader,
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
2020-02-12 23:53:07 -08:00
|
|
|
errc := make(chan error, 1)
|
2016-02-28 14:59:03 -08:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
2022-04-27 02:24:36 -07:00
|
|
|
_, err := ts.scrape(ctx, io.Discard)
|
2023-04-09 00:08:40 -07:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2019-03-25 16:01:12 -07:00
|
|
|
errc <- errors.New("Expected error but got nil")
|
2023-04-09 00:08:40 -07:00
|
|
|
case ctx.Err() != context.Canceled:
|
2019-10-10 02:47:30 -07:00
|
|
|
errc <- errors.Errorf("Expected context cancellation error but got: %s", ctx.Err())
|
2023-04-09 00:08:40 -07:00
|
|
|
default:
|
2020-02-12 23:53:07 -08:00
|
|
|
close(errc)
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape function did not return unexpectedly")
|
2016-11-13 09:21:42 -08:00
|
|
|
case err := <-errc:
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
// If this is closed in a defer above the function the test server
|
2018-04-27 05:04:02 -07:00
|
|
|
// doesn't terminate and the test doesn't complete.
|
2016-02-28 14:59:03 -08:00
|
|
|
close(block)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
2016-12-29 00:27:30 -08:00
|
|
|
labels: labels.FromStrings(
|
|
|
|
model.SchemeLabel, serverURL.Scheme,
|
|
|
|
model.AddressLabel, serverURL.Host,
|
|
|
|
),
|
2016-02-28 14:59:03 -08:00
|
|
|
},
|
2022-10-12 00:48:25 -07:00
|
|
|
client: http.DefaultClient,
|
|
|
|
acceptHeader: scrapeAcceptHeader,
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
|
2022-04-27 02:24:36 -07:00
|
|
|
_, err = ts.scrape(context.Background(), io.Discard)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
|
2016-02-28 14:59:03 -08:00
|
|
|
}
|
|
|
|
|
2021-05-15 19:19:22 -07:00
|
|
|
func TestTargetScraperBodySizeLimit(t *testing.T) {
|
|
|
|
const (
|
|
|
|
bodySizeLimit = 15
|
|
|
|
responseBody = "metric_a 1\nmetric_b 2\n"
|
|
|
|
)
|
|
|
|
var gzipResponse bool
|
|
|
|
server := httptest.NewServer(
|
|
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
|
|
|
if gzipResponse {
|
|
|
|
w.Header().Set("Content-Encoding", "gzip")
|
|
|
|
gw := gzip.NewWriter(w)
|
|
|
|
defer gw.Close()
|
|
|
|
gw.Write([]byte(responseBody))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
w.Write([]byte(responseBody))
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ts := &targetScraper{
|
|
|
|
Target: &Target{
|
|
|
|
labels: labels.FromStrings(
|
|
|
|
model.SchemeLabel, serverURL.Scheme,
|
|
|
|
model.AddressLabel, serverURL.Host,
|
|
|
|
),
|
|
|
|
},
|
|
|
|
client: http.DefaultClient,
|
|
|
|
bodySizeLimit: bodySizeLimit,
|
2022-10-12 00:48:25 -07:00
|
|
|
acceptHeader: scrapeAcceptHeader,
|
2021-05-15 19:19:22 -07:00
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
// Target response uncompressed body, scrape with body size limit.
|
|
|
|
_, err = ts.scrape(context.Background(), &buf)
|
|
|
|
require.ErrorIs(t, err, errBodySizeLimit)
|
|
|
|
require.Equal(t, bodySizeLimit, buf.Len())
|
|
|
|
// Target response gzip compressed body, scrape with body size limit.
|
|
|
|
gzipResponse = true
|
|
|
|
buf.Reset()
|
|
|
|
_, err = ts.scrape(context.Background(), &buf)
|
|
|
|
require.ErrorIs(t, err, errBodySizeLimit)
|
|
|
|
require.Equal(t, bodySizeLimit, buf.Len())
|
|
|
|
// Target response uncompressed body, scrape without body size limit.
|
|
|
|
gzipResponse = false
|
|
|
|
buf.Reset()
|
|
|
|
ts.bodySizeLimit = 0
|
|
|
|
_, err = ts.scrape(context.Background(), &buf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(responseBody), buf.Len())
|
|
|
|
// Target response gzip compressed body, scrape without body size limit.
|
|
|
|
gzipResponse = true
|
|
|
|
buf.Reset()
|
|
|
|
_, err = ts.scrape(context.Background(), &buf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(responseBody), buf.Len())
|
|
|
|
}
|
|
|
|
|
2016-02-23 01:58:16 -08:00
|
|
|
// testScraper implements the scraper interface and allows setting values
|
|
|
|
// returned by its methods. It also allows setting a custom scrape function.
|
|
|
|
type testScraper struct {
|
|
|
|
offsetDur time.Duration
|
|
|
|
|
|
|
|
lastStart time.Time
|
|
|
|
lastDuration time.Duration
|
|
|
|
lastError error
|
|
|
|
|
|
|
|
scrapeErr error
|
2017-01-15 08:33:07 -08:00
|
|
|
scrapeFunc func(context.Context, io.Writer) error
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func (ts *testScraper) offset(time.Duration, uint64) time.Duration {
|
2016-02-23 01:58:16 -08:00
|
|
|
return ts.offsetDur
|
|
|
|
}
|
|
|
|
|
2019-11-11 13:42:24 -08:00
|
|
|
func (ts *testScraper) Report(start time.Time, duration time.Duration, err error) {
|
2016-02-23 01:58:16 -08:00
|
|
|
ts.lastStart = start
|
|
|
|
ts.lastDuration = duration
|
|
|
|
ts.lastError = err
|
|
|
|
}
|
|
|
|
|
2018-10-04 06:52:03 -07:00
|
|
|
func (ts *testScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
|
2016-02-23 01:58:16 -08:00
|
|
|
if ts.scrapeFunc != nil {
|
2018-10-04 06:52:03 -07:00
|
|
|
return "", ts.scrapeFunc(ctx, w)
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
2018-10-04 06:52:03 -07:00
|
|
|
return "", ts.scrapeErr
|
2016-02-23 01:58:16 -08:00
|
|
|
}
|
2019-03-15 03:04:15 -07:00
|
|
|
|
|
|
|
func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2019-03-15 03:04:15 -07:00
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
app := s.Appender(context.Background())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2019-03-15 03:04:15 -07:00
|
|
|
capp := &collectResultAppender{next: app}
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2019-03-15 03:04:15 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return capp },
|
2019-03-15 03:04:15 -07:00
|
|
|
nil, 0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2019-03-15 03:04:15 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-03-15 03:04:15 -07:00
|
|
|
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
|
|
|
t: 0,
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
2019-03-15 03:04:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
2019-08-08 18:35:39 -07:00
|
|
|
s := teststorage.New(t)
|
2019-03-15 03:04:15 -07:00
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
app := s.Appender(context.Background())
|
2019-11-04 15:43:42 -08:00
|
|
|
|
2019-03-15 03:04:15 -07:00
|
|
|
capp := &collectResultAppender{next: app}
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2019-03-15 03:04:15 -07:00
|
|
|
nil, nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return capp },
|
2019-03-15 03:04:15 -07:00
|
|
|
nil, 0,
|
|
|
|
false,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2019-03-15 03:04:15 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
now := time.Now()
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2019-03-15 03:04:15 -07:00
|
|
|
|
|
|
|
want := []sample{
|
|
|
|
{
|
|
|
|
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
|
|
|
t: timestamp.FromTime(now),
|
|
|
|
v: 1,
|
|
|
|
},
|
|
|
|
}
|
2022-07-01 05:28:56 -07:00
|
|
|
require.Equal(t, want, capp.result, "Appended samples not as expected:\n%s", appender)
|
2019-03-15 03:04:15 -07:00
|
|
|
}
|
2020-01-20 03:05:27 -08:00
|
|
|
|
|
|
|
func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
&testScraper{},
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-17 03:30:22 -07:00
|
|
|
s.Appender,
|
2020-01-20 03:05:27 -08:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-01-20 03:05:27 -08:00
|
|
|
)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// We add a good and a bad metric to check that both are discarded.
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp := sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Error(t, err)
|
|
|
|
require.NoError(t, slApp.Rollback())
|
2020-01-20 03:05:27 -08:00
|
|
|
|
|
|
|
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-06-09 09:57:31 -07:00
|
|
|
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, false, series.Next(), "series found in tsdb")
|
|
|
|
require.NoError(t, series.Err())
|
2020-01-20 03:05:27 -08:00
|
|
|
|
|
|
|
// We add a good metric to check that it is recorded.
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp = sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err = sl.append(slApp, []byte("test_metric{le=\"500\"} 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2020-01-20 03:05:27 -08:00
|
|
|
|
|
|
|
q, err = s.Querier(ctx, time.Time{}.UnixNano(), 0)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-06-09 09:57:31 -07:00
|
|
|
series = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, true, series.Next(), "series not found in tsdb")
|
|
|
|
require.NoError(t, series.Err())
|
|
|
|
require.Equal(t, false, series.Next(), "more than one series found in tsdb")
|
2020-01-20 03:05:27 -08:00
|
|
|
}
|
2020-01-22 04:13:47 -08:00
|
|
|
|
2020-03-01 23:18:05 -08:00
|
|
|
func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
app := s.Appender(context.Background())
|
2020-03-01 23:18:05 -08:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-07-30 04:11:13 -07:00
|
|
|
sl := newScrapeLoop(context.Background(),
|
2020-03-01 23:18:05 -08:00
|
|
|
&testScraper{},
|
|
|
|
nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
if l.Has("drop") {
|
2022-05-30 07:37:16 -07:00
|
|
|
return labels.FromStrings("no", "name") // This label set will trigger an error.
|
2020-03-01 23:18:05 -08:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
},
|
|
|
|
nopMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
2020-03-01 23:18:05 -08:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-03-01 23:18:05 -08:00
|
|
|
)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-07-30 04:11:13 -07:00
|
|
|
slApp := sl.appender(context.Background())
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte("nok 1\nnok2{drop=\"drop\"} 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Error(t, err)
|
|
|
|
require.NoError(t, slApp.Rollback())
|
|
|
|
require.Equal(t, errNameLabelMandatory, err)
|
2020-03-01 23:18:05 -08:00
|
|
|
|
|
|
|
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-06-09 09:57:31 -07:00
|
|
|
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, false, series.Next(), "series found in tsdb")
|
|
|
|
require.NoError(t, series.Err())
|
2020-03-01 23:18:05 -08:00
|
|
|
}
|
|
|
|
|
2020-01-22 04:13:47 -08:00
|
|
|
func TestReusableConfig(t *testing.T) {
|
|
|
|
variants := []*config.ScrapeConfig{
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "httpd",
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
2020-04-15 03:17:41 -07:00
|
|
|
{
|
2020-01-22 04:13:47 -08:00
|
|
|
JobName: "prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
SampleLimit: 1000,
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
match := [][]int{
|
2020-04-15 03:17:41 -07:00
|
|
|
{0, 2},
|
|
|
|
{4, 5},
|
|
|
|
{4, 6},
|
|
|
|
{4, 7},
|
|
|
|
{5, 6},
|
|
|
|
{5, 7},
|
|
|
|
{6, 7},
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
noMatch := [][]int{
|
2020-04-15 03:17:41 -07:00
|
|
|
{1, 2},
|
|
|
|
{0, 4},
|
|
|
|
{3, 4},
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
for i, m := range match {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[1]]), "match test %d", i)
|
|
|
|
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[0]]), "match test %d", i)
|
|
|
|
require.Equal(t, true, reusableCache(variants[m[1]], variants[m[1]]), "match test %d", i)
|
|
|
|
require.Equal(t, true, reusableCache(variants[m[0]], variants[m[0]]), "match test %d", i)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
for i, m := range noMatch {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, false, reusableCache(variants[m[0]], variants[m[1]]), "not match test %d", i)
|
|
|
|
require.Equal(t, false, reusableCache(variants[m[1]], variants[m[0]]), "not match test %d", i)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReuseScrapeCache(t *testing.T) {
|
|
|
|
var (
|
|
|
|
app = &nopAppendable{}
|
|
|
|
cfg = &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
2020-01-22 04:13:47 -08:00
|
|
|
t1 = &Target{
|
2022-05-30 07:37:16 -07:00
|
|
|
discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
proxyURL, _ = url.Parse("http://localhost:2128")
|
|
|
|
)
|
2020-07-27 01:38:08 -07:00
|
|
|
defer sp.stop()
|
2020-01-22 04:13:47 -08:00
|
|
|
sp.sync([]*Target{t1})
|
|
|
|
|
|
|
|
steps := []struct {
|
|
|
|
keep bool
|
|
|
|
newConfig *config.ScrapeConfig
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
keep: true,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: true,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
SampleLimit: 400,
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
HonorTimestamps: true,
|
|
|
|
SampleLimit: 400,
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: true,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
HonorTimestamps: true,
|
|
|
|
SampleLimit: 400,
|
|
|
|
HTTPClientConfig: config_util.HTTPClientConfig{
|
2023-03-08 02:44:15 -08:00
|
|
|
ProxyConfig: config_util.ProxyConfig{ProxyURL: config_util.URL{URL: proxyURL}},
|
2020-01-22 04:13:47 -08:00
|
|
|
},
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
HonorTimestamps: true,
|
|
|
|
HonorLabels: true,
|
|
|
|
SampleLimit: 400,
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics2",
|
|
|
|
},
|
|
|
|
},
|
2022-03-03 09:37:53 -08:00
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
LabelLimit: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
LabelLimit: 15,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
LabelLimit: 15,
|
|
|
|
LabelNameLengthLimit: 5,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
keep: false,
|
|
|
|
newConfig: &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
LabelLimit: 15,
|
|
|
|
LabelNameLengthLimit: 5,
|
|
|
|
LabelValueLengthLimit: 7,
|
|
|
|
},
|
|
|
|
},
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
cacheAddr := func(sp *scrapePool) map[uint64]string {
|
|
|
|
r := make(map[uint64]string)
|
|
|
|
for fp, l := range sp.loops {
|
|
|
|
r[fp] = fmt.Sprintf("%p", l.getCache())
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, s := range steps {
|
|
|
|
initCacheAddr := cacheAddr(sp)
|
|
|
|
sp.reload(s.newConfig)
|
|
|
|
for fp, newCacheAddr := range cacheAddr(sp) {
|
|
|
|
if s.keep {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
|
2020-01-22 04:13:47 -08:00
|
|
|
} else {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NotEqual(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are the same", i)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
initCacheAddr = cacheAddr(sp)
|
|
|
|
sp.reload(s.newConfig)
|
|
|
|
for fp, newCacheAddr := range cacheAddr(sp) {
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-16 14:52:02 -07:00
|
|
|
|
|
|
|
func TestScrapeAddFast(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
&testScraper{},
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
2020-07-17 03:30:22 -07:00
|
|
|
s.Appender,
|
2020-03-16 14:52:02 -07:00
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-03-16 14:52:02 -07:00
|
|
|
)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp := sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err := sl.append(slApp, []byte("up 1\n"), "", time.Time{})
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2020-03-16 14:52:02 -07:00
|
|
|
|
|
|
|
// Poison the cache. There is just one entry, and one series in the
|
|
|
|
// storage. Changing the ref will create a 'not found' error.
|
|
|
|
for _, v := range sl.getCache().series {
|
|
|
|
v.ref++
|
|
|
|
}
|
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
slApp = sl.appender(ctx)
|
2020-07-16 04:53:39 -07:00
|
|
|
_, _, _, err = sl.append(slApp, []byte("up 1\n"), "", time.Time{}.Add(time.Second))
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
2020-03-16 14:52:02 -07:00
|
|
|
}
|
2020-03-20 09:43:26 -07:00
|
|
|
|
2023-04-12 04:05:41 -07:00
|
|
|
func TestReuseCacheRace(*testing.T) {
|
2020-03-20 09:43:26 -07:00
|
|
|
var (
|
|
|
|
app = &nopAppendable{}
|
|
|
|
cfg = &config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
|
|
|
ScrapeInterval: model.Duration(5 * time.Second),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
2020-03-20 09:43:26 -07:00
|
|
|
t1 = &Target{
|
2022-05-30 07:37:16 -07:00
|
|
|
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
|
2020-03-20 09:43:26 -07:00
|
|
|
}
|
|
|
|
)
|
2020-07-27 01:38:08 -07:00
|
|
|
defer sp.stop()
|
2020-03-20 09:43:26 -07:00
|
|
|
sp.sync([]*Target{t1})
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
for i := uint(1); i > 0; i++ {
|
|
|
|
if time.Since(start) > 5*time.Second {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
sp.reload(&config.ScrapeConfig{
|
|
|
|
JobName: "Prometheus",
|
|
|
|
ScrapeTimeout: model.Duration(1 * time.Millisecond),
|
|
|
|
ScrapeInterval: model.Duration(1 * time.Millisecond),
|
|
|
|
MetricsPath: "/metrics",
|
|
|
|
SampleLimit: i,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-05-26 07:14:55 -07:00
|
|
|
|
|
|
|
func TestCheckAddError(t *testing.T) {
|
|
|
|
var appErrs appendErrors
|
|
|
|
sl := scrapeLoop{l: log.NewNopLogger()}
|
2023-04-21 12:14:19 -07:00
|
|
|
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 1, appErrs.numOutOfOrder)
|
2020-05-26 07:14:55 -07:00
|
|
|
}
|
2020-07-16 04:53:39 -07:00
|
|
|
|
|
|
|
func TestScrapeReportSingleAppender(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
var (
|
|
|
|
signal = make(chan struct{}, 1)
|
|
|
|
scraper = &testScraper{}
|
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
sl := newScrapeLoop(ctx,
|
|
|
|
scraper,
|
|
|
|
nil, nil,
|
|
|
|
nopMutator,
|
|
|
|
nopMutator,
|
|
|
|
s.Appender,
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
nil,
|
2021-08-31 08:37:32 -07:00
|
|
|
10*time.Millisecond,
|
|
|
|
time.Hour,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2020-07-16 04:53:39 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
numScrapes := 0
|
|
|
|
|
|
|
|
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
|
|
|
numScrapes++
|
|
|
|
if numScrapes%4 == 0 {
|
|
|
|
return fmt.Errorf("scrape failed")
|
|
|
|
}
|
|
|
|
w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.run(nil)
|
2020-07-16 04:53:39 -07:00
|
|
|
signal <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
for time.Since(start) < 3*time.Second {
|
|
|
|
q, err := s.Querier(ctx, time.Time{}.UnixNano(), time.Now().UnixNano())
|
2020-10-29 02:43:23 -07:00
|
|
|
require.NoError(t, err)
|
2020-07-16 04:53:39 -07:00
|
|
|
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+"))
|
|
|
|
|
|
|
|
c := 0
|
|
|
|
for series.Next() {
|
2022-09-20 10:16:45 -07:00
|
|
|
i := series.At().Iterator(nil)
|
2021-11-28 23:54:23 -08:00
|
|
|
for i.Next() != chunkenc.ValNone {
|
2020-07-16 04:53:39 -07:00
|
|
|
c++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-29 02:43:23 -07:00
|
|
|
require.Equal(t, 0, c%9, "Appended samples not as expected: %d", c)
|
2020-07-16 04:53:39 -07:00
|
|
|
q.Close()
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-signal:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("Scrape wasn't stopped.")
|
|
|
|
}
|
|
|
|
}
|
2021-05-06 01:56:21 -07:00
|
|
|
|
2021-12-10 04:03:28 -08:00
|
|
|
func TestScrapeReportLimit(t *testing.T) {
|
|
|
|
s := teststorage.New(t)
|
|
|
|
defer s.Close()
|
|
|
|
|
|
|
|
cfg := &config.ScrapeConfig{
|
|
|
|
JobName: "test",
|
|
|
|
SampleLimit: 5,
|
|
|
|
Scheme: "http",
|
|
|
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
|
|
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
scrapes int
|
|
|
|
scrapedTwice = make(chan bool)
|
|
|
|
)
|
|
|
|
|
|
|
|
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprint(w, "metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")
|
|
|
|
scrapes++
|
|
|
|
if scrapes == 2 {
|
|
|
|
close(scrapedTwice)
|
|
|
|
}
|
|
|
|
}))
|
|
|
|
defer ts.Close()
|
|
|
|
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, err := newScrapePool(cfg, s, 0, nil, &Options{})
|
2021-12-10 04:03:28 -08:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer sp.stop()
|
|
|
|
|
|
|
|
testURL, err := url.Parse(ts.URL)
|
|
|
|
require.NoError(t, err)
|
|
|
|
sp.Sync([]*targetgroup.Group{
|
|
|
|
{
|
|
|
|
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("target was not scraped twice")
|
|
|
|
case <-scrapedTwice:
|
|
|
|
// If the target has been scraped twice, report samples from the first
|
|
|
|
// scrape have been inserted in the database.
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
q, err := s.Querier(ctx, time.Time{}.UnixNano(), time.Now().UnixNano())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer q.Close()
|
|
|
|
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "up"))
|
|
|
|
|
|
|
|
var found bool
|
|
|
|
for series.Next() {
|
2022-09-20 10:16:45 -07:00
|
|
|
i := series.At().Iterator(nil)
|
2021-12-18 05:12:01 -08:00
|
|
|
for i.Next() == chunkenc.ValFloat {
|
2021-12-10 04:03:28 -08:00
|
|
|
_, v := i.At()
|
|
|
|
require.Equal(t, 1.0, v)
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
require.True(t, found)
|
|
|
|
}
|
|
|
|
|
2021-05-06 01:56:21 -07:00
|
|
|
func TestScrapeLoopLabelLimit(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
title string
|
|
|
|
scrapeLabels string
|
|
|
|
discoveryLabels []string
|
|
|
|
labelLimits labelLimits
|
|
|
|
expectErr bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
title: "Valid number of labels",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelLimit: 5},
|
|
|
|
expectErr: false,
|
|
|
|
}, {
|
|
|
|
title: "Too many labels",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4", l5="5", l6="6"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelLimit: 5},
|
|
|
|
expectErr: true,
|
|
|
|
}, {
|
|
|
|
title: "Too many labels including discovery labels",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4"} 0`,
|
|
|
|
discoveryLabels: []string{"l5", "5", "l6", "6"},
|
|
|
|
labelLimits: labelLimits{labelLimit: 5},
|
|
|
|
expectErr: true,
|
|
|
|
}, {
|
|
|
|
title: "Valid labels name length",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelNameLengthLimit: 10},
|
|
|
|
expectErr: false,
|
|
|
|
}, {
|
|
|
|
title: "Label name too long",
|
|
|
|
scrapeLabels: `metric{label_name_too_long="0"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelNameLengthLimit: 10},
|
|
|
|
expectErr: true,
|
|
|
|
}, {
|
|
|
|
title: "Discovery label name too long",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2"} 0`,
|
|
|
|
discoveryLabels: []string{"label_name_too_long", "0"},
|
|
|
|
labelLimits: labelLimits{labelNameLengthLimit: 10},
|
|
|
|
expectErr: true,
|
|
|
|
}, {
|
|
|
|
title: "Valid labels value length",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelValueLengthLimit: 10},
|
|
|
|
expectErr: false,
|
|
|
|
}, {
|
|
|
|
title: "Label value too long",
|
|
|
|
scrapeLabels: `metric{l1="label_value_too_long"} 0`,
|
|
|
|
discoveryLabels: nil,
|
|
|
|
labelLimits: labelLimits{labelValueLengthLimit: 10},
|
|
|
|
expectErr: true,
|
|
|
|
}, {
|
|
|
|
title: "Discovery label value too long",
|
|
|
|
scrapeLabels: `metric{l1="1", l2="2"} 0`,
|
|
|
|
discoveryLabels: []string{"l1", "label_value_too_long"},
|
|
|
|
labelLimits: labelLimits{labelValueLengthLimit: 10},
|
|
|
|
expectErr: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
app := &collectResultAppender{}
|
|
|
|
|
|
|
|
discoveryLabels := &Target{
|
|
|
|
labels: labels.FromStrings(test.discoveryLabels...),
|
|
|
|
}
|
|
|
|
|
|
|
|
sl := newScrapeLoop(context.Background(),
|
|
|
|
nil, nil, nil,
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, discoveryLabels, false, nil)
|
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateReportSampleLabels(l, discoveryLabels)
|
|
|
|
},
|
|
|
|
func(ctx context.Context) storage.Appender { return app },
|
|
|
|
nil,
|
|
|
|
0,
|
|
|
|
true,
|
2023-04-21 12:14:19 -07:00
|
|
|
0, 0,
|
2021-05-06 01:56:21 -07:00
|
|
|
&test.labelLimits,
|
2021-08-31 08:37:32 -07:00
|
|
|
0,
|
|
|
|
0,
|
2021-08-24 05:31:14 -07:00
|
|
|
false,
|
2022-08-31 06:50:05 -07:00
|
|
|
false,
|
2023-05-10 16:59:21 -07:00
|
|
|
false,
|
2022-05-03 11:45:52 -07:00
|
|
|
nil,
|
|
|
|
false,
|
2021-05-06 01:56:21 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
slApp := sl.appender(context.Background())
|
|
|
|
_, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now())
|
|
|
|
|
|
|
|
t.Logf("Test:%s", test.title)
|
|
|
|
if test.expectErr {
|
|
|
|
require.Error(t, err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, slApp.Commit())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-08-31 08:37:32 -07:00
|
|
|
|
|
|
|
func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
|
|
|
interval, _ := model.ParseDuration("2s")
|
|
|
|
timeout, _ := model.ParseDuration("500ms")
|
|
|
|
config := &config.ScrapeConfig{
|
|
|
|
ScrapeInterval: interval,
|
|
|
|
ScrapeTimeout: timeout,
|
|
|
|
RelabelConfigs: []*relabel.Config{
|
|
|
|
{
|
|
|
|
SourceLabels: model.LabelNames{model.ScrapeIntervalLabel},
|
|
|
|
Regex: relabel.MustNewRegexp("2s"),
|
|
|
|
Replacement: "3s",
|
|
|
|
TargetLabel: model.ScrapeIntervalLabel,
|
|
|
|
Action: relabel.Replace,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
SourceLabels: model.LabelNames{model.ScrapeTimeoutLabel},
|
|
|
|
Regex: relabel.MustNewRegexp("500ms"),
|
|
|
|
Replacement: "750ms",
|
|
|
|
TargetLabel: model.ScrapeTimeoutLabel,
|
|
|
|
Action: relabel.Replace,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{})
|
2021-08-31 08:37:32 -07:00
|
|
|
tgts := []*targetgroup.Group{
|
|
|
|
{
|
|
|
|
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
sp.Sync(tgts)
|
|
|
|
defer sp.stop()
|
|
|
|
|
|
|
|
require.Equal(t, "3s", sp.ActiveTargets()[0].labels.Get(model.ScrapeIntervalLabel))
|
|
|
|
require.Equal(t, "750ms", sp.ActiveTargets()[0].labels.Get(model.ScrapeTimeoutLabel))
|
|
|
|
}
|