mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
To experiment with Prometheus pull methond on semi-short lived serverless environments we (Google Cloud) want to attempt init and shutdown scrapes to have some basic data from short lived workloads. This has little sense for Prometheus scrape manager, but it might be useful for external importers like OpenTelemetry and distributions, which can be deployed in various configurations (e.g. sidecar with just single target). It's up to us to merge it or close. I would be fine maintaining on upstream, but we might as well keep it in our fork to experiment on this--and perhaps merge once official contrib Otel decides to use those options. NOTE: Also added high level scrape manager test. I think it was kind of bad we never had test integrating all scrape manager pieces. Can add that in separate PR as well. Alternatives attempted: * Manager Option for scrape on shutdown. This was not trivial due to 3 different context we pass. We would need to disconnect them from parent context (sometimes) for scrapeAndReport. Intrusive and prone to mistaken cancelations. * ForceScrape method. This is not trivia as the scrape would need to be additionally locked. Plus semantics on what to do after (continue interval?) is not clear. We can scope this problem down to stopAndScrape semantics. Signed-off-by: bwplotka <bwplotka@gmail.com>
836 lines
24 KiB
Go
836 lines
24 KiB
Go
// Copyright 2013 The Prometheus Authors
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package scrape
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"net/url"
|
|
"os"
|
|
"strconv"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/go-kit/log"
|
|
"github.com/prometheus/common/model"
|
|
"github.com/stretchr/testify/require"
|
|
"gopkg.in/yaml.v2"
|
|
|
|
"github.com/prometheus/prometheus/config"
|
|
"github.com/prometheus/prometheus/discovery"
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
"github.com/prometheus/prometheus/model/relabel"
|
|
"github.com/prometheus/prometheus/util/runutil"
|
|
)
|
|
|
|
func TestPopulateLabels(t *testing.T) {
|
|
cases := []struct {
|
|
in labels.Labels
|
|
cfg *config.ScrapeConfig
|
|
noDefaultPort bool
|
|
res labels.Labels
|
|
resOrig labels.Labels
|
|
err string
|
|
}{
|
|
// Regular population of scrape config options.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
"custom": "value",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.InstanceLabel: "1.2.3.4:1000",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
"custom": "value",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
"custom": "value",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
},
|
|
// Pre-define/overwrite scrape config labels.
|
|
// Leave out port and expect it to be defaulted to scheme.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.SchemeLabel: "http",
|
|
model.MetricsPathLabel: "/custom",
|
|
model.JobLabel: "custom-job",
|
|
model.ScrapeIntervalLabel: "2s",
|
|
model.ScrapeTimeoutLabel: "2s",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:80",
|
|
model.InstanceLabel: "1.2.3.4:80",
|
|
model.SchemeLabel: "http",
|
|
model.MetricsPathLabel: "/custom",
|
|
model.JobLabel: "custom-job",
|
|
model.ScrapeIntervalLabel: "2s",
|
|
model.ScrapeTimeoutLabel: "2s",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.SchemeLabel: "http",
|
|
model.MetricsPathLabel: "/custom",
|
|
model.JobLabel: "custom-job",
|
|
model.ScrapeIntervalLabel: "2s",
|
|
model.ScrapeTimeoutLabel: "2s",
|
|
}),
|
|
},
|
|
// Provide instance label. HTTPS port default for IPv6.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "[::1]",
|
|
model.InstanceLabel: "custom-instance",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "[::1]:443",
|
|
model.InstanceLabel: "custom-instance",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "[::1]",
|
|
model.InstanceLabel: "custom-instance",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
},
|
|
// Address label missing.
|
|
{
|
|
in: labels.FromStrings("custom", "value"),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "no address",
|
|
},
|
|
// Address label missing, but added in relabelling.
|
|
{
|
|
in: labels.FromStrings("custom", "host:1234"),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
RelabelConfigs: []*relabel.Config{
|
|
{
|
|
Action: relabel.Replace,
|
|
Regex: relabel.MustNewRegexp("(.*)"),
|
|
SourceLabels: model.LabelNames{"custom"},
|
|
Replacement: "${1}",
|
|
TargetLabel: string(model.AddressLabel),
|
|
},
|
|
},
|
|
},
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "host:1234",
|
|
model.InstanceLabel: "host:1234",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
"custom": "host:1234",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
"custom": "host:1234",
|
|
}),
|
|
},
|
|
// Address label missing, but added in relabelling.
|
|
{
|
|
in: labels.FromStrings("custom", "host:1234"),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
RelabelConfigs: []*relabel.Config{
|
|
{
|
|
Action: relabel.Replace,
|
|
Regex: relabel.MustNewRegexp("(.*)"),
|
|
SourceLabels: model.LabelNames{"custom"},
|
|
Replacement: "${1}",
|
|
TargetLabel: string(model.AddressLabel),
|
|
},
|
|
},
|
|
},
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "host:1234",
|
|
model.InstanceLabel: "host:1234",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
"custom": "host:1234",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
"custom": "host:1234",
|
|
}),
|
|
},
|
|
// Invalid UTF-8 in label.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
"custom": "\xbd",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "invalid label value for \"custom\": \"\\xbd\"",
|
|
},
|
|
// Invalid duration in interval label.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.ScrapeIntervalLabel: "2notseconds",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "error parsing scrape interval: unknown unit \"notseconds\" in duration \"2notseconds\"",
|
|
},
|
|
// Invalid duration in timeout label.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.ScrapeTimeoutLabel: "2notseconds",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "error parsing scrape timeout: unknown unit \"notseconds\" in duration \"2notseconds\"",
|
|
},
|
|
// 0 interval in timeout label.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.ScrapeIntervalLabel: "0s",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "scrape interval cannot be 0",
|
|
},
|
|
// 0 duration in timeout label.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.ScrapeTimeoutLabel: "0s",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "scrape timeout cannot be 0",
|
|
},
|
|
// Timeout less than interval.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:1000",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "2s",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
res: labels.EmptyLabels(),
|
|
resOrig: labels.EmptyLabels(),
|
|
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
|
|
},
|
|
// Don't attach default port.
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
noDefaultPort: true,
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.InstanceLabel: "1.2.3.4",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
},
|
|
// Remove default port (http).
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:80",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "http",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
noDefaultPort: true,
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.InstanceLabel: "1.2.3.4:80",
|
|
model.SchemeLabel: "http",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:80",
|
|
model.SchemeLabel: "http",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
},
|
|
// Remove default port (https).
|
|
{
|
|
in: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:443",
|
|
}),
|
|
cfg: &config.ScrapeConfig{
|
|
Scheme: "https",
|
|
MetricsPath: "/metrics",
|
|
JobName: "job",
|
|
ScrapeInterval: model.Duration(time.Second),
|
|
ScrapeTimeout: model.Duration(time.Second),
|
|
},
|
|
noDefaultPort: true,
|
|
res: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4",
|
|
model.InstanceLabel: "1.2.3.4:443",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
resOrig: labels.FromMap(map[string]string{
|
|
model.AddressLabel: "1.2.3.4:443",
|
|
model.SchemeLabel: "https",
|
|
model.MetricsPathLabel: "/metrics",
|
|
model.JobLabel: "job",
|
|
model.ScrapeIntervalLabel: "1s",
|
|
model.ScrapeTimeoutLabel: "1s",
|
|
}),
|
|
},
|
|
}
|
|
for _, c := range cases {
|
|
in := c.in.Copy()
|
|
|
|
res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort)
|
|
if c.err != "" {
|
|
require.EqualError(t, err, c.err)
|
|
} else {
|
|
require.NoError(t, err)
|
|
}
|
|
require.Equal(t, c.in, in)
|
|
require.Equal(t, c.res, res)
|
|
require.Equal(t, c.resOrig, orig)
|
|
}
|
|
}
|
|
|
|
func loadConfiguration(t testing.TB, c string) *config.Config {
|
|
t.Helper()
|
|
|
|
cfg := &config.Config{}
|
|
if err := yaml.UnmarshalStrict([]byte(c), cfg); err != nil {
|
|
t.Fatalf("Unable to load YAML config: %s", err)
|
|
}
|
|
return cfg
|
|
}
|
|
|
|
func noopLoop() loop {
|
|
return &testLoop{
|
|
startFunc: func(interval, timeout time.Duration, errc chan<- error) {},
|
|
stopFunc: func() {},
|
|
}
|
|
}
|
|
|
|
func TestManagerApplyConfig(t *testing.T) {
|
|
// Valid initial configuration.
|
|
cfgText1 := `
|
|
scrape_configs:
|
|
- job_name: job1
|
|
static_configs:
|
|
- targets: ["foo:9090"]
|
|
`
|
|
// Invalid configuration.
|
|
cfgText2 := `
|
|
scrape_configs:
|
|
- job_name: job1
|
|
scheme: https
|
|
static_configs:
|
|
- targets: ["foo:9090"]
|
|
tls_config:
|
|
ca_file: /not/existing/ca/file
|
|
`
|
|
// Valid configuration.
|
|
cfgText3 := `
|
|
scrape_configs:
|
|
- job_name: job1
|
|
scheme: https
|
|
static_configs:
|
|
- targets: ["foo:9090"]
|
|
`
|
|
var (
|
|
cfg1 = loadConfiguration(t, cfgText1)
|
|
cfg2 = loadConfiguration(t, cfgText2)
|
|
cfg3 = loadConfiguration(t, cfgText3)
|
|
|
|
ch = make(chan struct{}, 1)
|
|
)
|
|
|
|
opts := Options{}
|
|
scrapeManager := NewManager(&opts, nil, nil)
|
|
newLoop := func(scrapeLoopOptions) loop {
|
|
ch <- struct{}{}
|
|
return noopLoop()
|
|
}
|
|
sp := &scrapePool{
|
|
appendable: &nopAppendable{},
|
|
activeTargets: map[uint64]*Target{
|
|
1: {},
|
|
},
|
|
loops: map[uint64]loop{
|
|
1: noopLoop(),
|
|
},
|
|
newLoop: newLoop,
|
|
logger: nil,
|
|
config: cfg1.ScrapeConfigs[0],
|
|
client: http.DefaultClient,
|
|
opts: &opts,
|
|
}
|
|
scrapeManager.scrapePools = map[string]*scrapePool{
|
|
"job1": sp,
|
|
}
|
|
|
|
// Apply the initial configuration.
|
|
if err := scrapeManager.ApplyConfig(cfg1); err != nil {
|
|
t.Fatalf("unable to apply configuration: %s", err)
|
|
}
|
|
select {
|
|
case <-ch:
|
|
t.Fatal("reload happened")
|
|
default:
|
|
}
|
|
|
|
// Apply a configuration for which the reload fails.
|
|
if err := scrapeManager.ApplyConfig(cfg2); err == nil {
|
|
t.Fatalf("expecting error but got none")
|
|
}
|
|
select {
|
|
case <-ch:
|
|
t.Fatal("reload happened")
|
|
default:
|
|
}
|
|
|
|
// Apply a configuration for which the reload succeeds.
|
|
if err := scrapeManager.ApplyConfig(cfg3); err != nil {
|
|
t.Fatalf("unable to apply configuration: %s", err)
|
|
}
|
|
select {
|
|
case <-ch:
|
|
default:
|
|
t.Fatal("reload didn't happen")
|
|
}
|
|
|
|
// Re-applying the same configuration shouldn't trigger a reload.
|
|
if err := scrapeManager.ApplyConfig(cfg3); err != nil {
|
|
t.Fatalf("unable to apply configuration: %s", err)
|
|
}
|
|
select {
|
|
case <-ch:
|
|
t.Fatal("reload happened")
|
|
default:
|
|
}
|
|
}
|
|
|
|
func TestManagerTargetsUpdates(t *testing.T) {
|
|
opts := Options{}
|
|
m := NewManager(&opts, nil, nil)
|
|
|
|
ts := make(chan map[string][]*targetgroup.Group)
|
|
go m.Run(ts)
|
|
defer m.Stop()
|
|
|
|
tgSent := make(map[string][]*targetgroup.Group)
|
|
for x := 0; x < 10; x++ {
|
|
|
|
tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
|
|
{
|
|
Source: strconv.Itoa(x),
|
|
},
|
|
}
|
|
|
|
select {
|
|
case ts <- tgSent:
|
|
case <-time.After(10 * time.Millisecond):
|
|
t.Error("Scrape manager's channel remained blocked after the set threshold.")
|
|
}
|
|
}
|
|
|
|
m.mtxScrape.Lock()
|
|
tsetActual := m.targetSets
|
|
m.mtxScrape.Unlock()
|
|
|
|
// Make sure all updates have been received.
|
|
require.Equal(t, tgSent, tsetActual)
|
|
|
|
select {
|
|
case <-m.triggerReload:
|
|
default:
|
|
t.Error("No scrape loops reload was triggered after targets update.")
|
|
}
|
|
}
|
|
|
|
func TestSetOffsetSeed(t *testing.T) {
|
|
getConfig := func(prometheus string) *config.Config {
|
|
cfgText := `
|
|
global:
|
|
external_labels:
|
|
prometheus: '` + prometheus + `'
|
|
`
|
|
|
|
cfg := &config.Config{}
|
|
if err := yaml.UnmarshalStrict([]byte(cfgText), cfg); err != nil {
|
|
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
|
|
}
|
|
|
|
return cfg
|
|
}
|
|
|
|
opts := Options{}
|
|
scrapeManager := NewManager(&opts, nil, nil)
|
|
|
|
// Load the first config.
|
|
cfg1 := getConfig("ha1")
|
|
if err := scrapeManager.setOffsetSeed(cfg1.GlobalConfig.ExternalLabels); err != nil {
|
|
t.Error(err)
|
|
}
|
|
offsetSeed1 := scrapeManager.offsetSeed
|
|
|
|
if offsetSeed1 == 0 {
|
|
t.Error("Offset seed has to be a hash of uint64")
|
|
}
|
|
|
|
// Load the first config.
|
|
cfg2 := getConfig("ha2")
|
|
if err := scrapeManager.setOffsetSeed(cfg2.GlobalConfig.ExternalLabels); err != nil {
|
|
t.Error(err)
|
|
}
|
|
offsetSeed2 := scrapeManager.offsetSeed
|
|
|
|
if offsetSeed1 == offsetSeed2 {
|
|
t.Error("Offset seed should not be the same on different set of external labels")
|
|
}
|
|
}
|
|
|
|
func TestManagerScrapePools(t *testing.T) {
|
|
cfgText1 := `
|
|
scrape_configs:
|
|
- job_name: job1
|
|
static_configs:
|
|
- targets: ["foo:9090"]
|
|
- job_name: job2
|
|
static_configs:
|
|
- targets: ["foo:9091", "foo:9092"]
|
|
`
|
|
cfgText2 := `
|
|
scrape_configs:
|
|
- job_name: job1
|
|
static_configs:
|
|
- targets: ["foo:9090", "foo:9094"]
|
|
- job_name: job3
|
|
static_configs:
|
|
- targets: ["foo:9093"]
|
|
`
|
|
var (
|
|
cfg1 = loadConfiguration(t, cfgText1)
|
|
cfg2 = loadConfiguration(t, cfgText2)
|
|
)
|
|
|
|
reload := func(scrapeManager *Manager, cfg *config.Config) {
|
|
newLoop := func(scrapeLoopOptions) loop {
|
|
return noopLoop()
|
|
}
|
|
scrapeManager.scrapePools = map[string]*scrapePool{}
|
|
for _, sc := range cfg.ScrapeConfigs {
|
|
_, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
sp := &scrapePool{
|
|
appendable: &nopAppendable{},
|
|
activeTargets: map[uint64]*Target{},
|
|
loops: map[uint64]loop{
|
|
1: noopLoop(),
|
|
},
|
|
newLoop: newLoop,
|
|
logger: nil,
|
|
config: sc,
|
|
client: http.DefaultClient,
|
|
cancel: cancel,
|
|
}
|
|
for _, c := range sc.ServiceDiscoveryConfigs {
|
|
staticConfig := c.(discovery.StaticConfig)
|
|
for _, group := range staticConfig {
|
|
for i := range group.Targets {
|
|
sp.activeTargets[uint64(i)] = &Target{}
|
|
}
|
|
}
|
|
}
|
|
scrapeManager.scrapePools[sc.JobName] = sp
|
|
}
|
|
}
|
|
|
|
scrapeManager := NewManager(&Options{}, nil, nil)
|
|
|
|
reload(scrapeManager, cfg1)
|
|
require.ElementsMatch(t, []string{"job1", "job2"}, scrapeManager.ScrapePools())
|
|
|
|
reload(scrapeManager, cfg2)
|
|
require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools())
|
|
}
|
|
|
|
func TestManagerStopAfterScrapeAttempt(t *testing.T) {
|
|
for _, tcase := range []struct {
|
|
name string
|
|
noJitter bool
|
|
stop func(m *Manager)
|
|
expectedSamples int
|
|
}{
|
|
{
|
|
name: "no scrape stop, no jitter",
|
|
noJitter: true,
|
|
stop: func(m *Manager) { m.Stop() },
|
|
expectedSamples: 1,
|
|
},
|
|
{
|
|
name: "no scrape on stop, with jitter",
|
|
stop: func(m *Manager) { m.Stop() },
|
|
expectedSamples: 0,
|
|
},
|
|
{
|
|
name: "scrape on stop, no jitter",
|
|
noJitter: true,
|
|
stop: func(m *Manager) { m.StopAfterScrapeAttempt(time.Now()) },
|
|
expectedSamples: 2,
|
|
},
|
|
{
|
|
name: "scrape on stop, but initial sample is fresh enough, no jitter",
|
|
noJitter: true,
|
|
stop: func(m *Manager) { m.StopAfterScrapeAttempt(time.Now().Add(-1 * time.Hour)) },
|
|
expectedSamples: 1,
|
|
},
|
|
{
|
|
name: "scrape on stop, with jitter",
|
|
stop: func(m *Manager) { m.StopAfterScrapeAttempt(time.Now()) },
|
|
expectedSamples: 1,
|
|
},
|
|
} {
|
|
t.Run(tcase.name, func(t *testing.T) {
|
|
app := &collectResultAppender{}
|
|
|
|
// Setup scrape manager.
|
|
scrapeManager := NewManager(&Options{
|
|
IgnoreJitter: tcase.noJitter,
|
|
|
|
// Extremely high value to turn it off. We don't want to wait minimum 5s, so
|
|
// we reload manually.
|
|
// TODO(bwplotka): Make scrape manager more testable.
|
|
DiscoveryReloadInterval: model.Duration(99 * time.Hour),
|
|
}, log.NewLogfmtLogger(os.Stderr), &collectResultAppendable{app})
|
|
|
|
require.NoError(t, scrapeManager.ApplyConfig(&config.Config{
|
|
GlobalConfig: config.GlobalConfig{
|
|
// Extremely high scrape interval, to ensure the only chance to see the
|
|
// sample is on start and stopAfterScrapeAttempt.
|
|
ScrapeInterval: model.Duration(99 * time.Hour),
|
|
ScrapeTimeout: model.Duration(10 * time.Second),
|
|
},
|
|
ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}},
|
|
}))
|
|
|
|
// Start fake HTTP target to scrape returning a single metric.
|
|
server := httptest.NewServer(
|
|
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", `text/plain; version=0.0.4`)
|
|
w.Write([]byte("expected_metric 1\n"))
|
|
}),
|
|
)
|
|
defer server.Close()
|
|
|
|
serverURL, err := url.Parse(server.URL)
|
|
require.NoError(t, err)
|
|
|
|
// Add fake target directly into tsets + reload. Normally users would use
|
|
// Manager.Run and wait for minimum 5s refresh interval.
|
|
scrapeManager.updateTsets(map[string][]*targetgroup.Group{
|
|
"test": {
|
|
{
|
|
Targets: []model.LabelSet{{
|
|
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
|
|
model.AddressLabel: model.LabelValue(serverURL.Host),
|
|
}},
|
|
},
|
|
},
|
|
})
|
|
scrapeManager.reload()
|
|
|
|
// At this point the first sample is scheduled to be scraped after the initial
|
|
// jitter in the background scrape loop go-routine
|
|
//
|
|
// With jitter the first sample will appear after long time,
|
|
// given the extremely long scrape interval configured. We stop right
|
|
// away and expect only the last sample due to stop.
|
|
//
|
|
// With no jitter setting, we expect the first to be added straight away--wait
|
|
// for it, before stopping.
|
|
if tcase.noJitter {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
defer cancel()
|
|
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
|
|
if countFloatSamples(app, "expected_metric") < 1 {
|
|
return errors.New("expected more then one expected_metric sample")
|
|
}
|
|
return nil
|
|
}), "after 5 seconds")
|
|
}
|
|
|
|
tcase.stop(scrapeManager)
|
|
|
|
require.Equal(t, tcase.expectedSamples, countFloatSamples(app, "expected_metric"))
|
|
})
|
|
}
|
|
}
|
|
|
|
func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) {
|
|
a.mtx.Lock()
|
|
defer a.mtx.Unlock()
|
|
|
|
for _, f := range a.resultFloats {
|
|
if f.metric.Get(model.MetricNameLabel) == expectedMetricName {
|
|
count++
|
|
}
|
|
}
|
|
return count
|
|
}
|