mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge pull request #644 from fabxc/fabxc/sdprep
This commit is contained in:
commit
9042b4f081
|
@ -1,10 +1,10 @@
|
||||||
global <
|
global <
|
||||||
scrape_interval: "30s"
|
scrape_interval: "30s"
|
||||||
evaluation_interval: "30s"
|
evaluation_interval: "30s"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "monitor-test"
|
name: "monitor-test"
|
||||||
value: "test"
|
value: "test"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
global <
|
global <
|
||||||
scrape_interval: "30s"
|
scrape_interval: "30s"
|
||||||
evaluation_interval: "30s"
|
evaluation_interval: "30s"
|
||||||
unknown_field: "foo"
|
unknown_field: "foo"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "monitor"
|
name: "monitor"
|
||||||
value: "test"
|
value: "test"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
global <
|
global <
|
||||||
scrape_interval: "30"
|
scrape_interval: "30"
|
||||||
evaluation_interval: "30s"
|
evaluation_interval: "30s"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "monitor"
|
name: "monitor"
|
||||||
value: "test"
|
value: "test"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
global <
|
global <
|
||||||
scrape_interval: "30s"
|
scrape_interval: "30s"
|
||||||
evaluation_interval: "30s"
|
evaluation_interval: "30s"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "monitor"
|
name: "monitor"
|
||||||
value: "test"
|
value: "test"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
rule_file: "prometheus.rules"
|
rule_file: "prometheus.rules"
|
||||||
>
|
>
|
||||||
|
|
||||||
job: <
|
job: <
|
||||||
name: "prometheus"
|
name: "prometheus"
|
||||||
scrape_interval: "15s"
|
scrape_interval: "15s"
|
||||||
|
|
||||||
target_group: <
|
target_group: <
|
||||||
target: "http://localhost:9090/metrics.json"
|
target: "http://localhost:9090/metrics.json"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
|
@ -1,55 +1,55 @@
|
||||||
global <
|
global <
|
||||||
scrape_interval: "30s"
|
scrape_interval: "30s"
|
||||||
evaluation_interval: "30s"
|
evaluation_interval: "30s"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "monitor"
|
name: "monitor"
|
||||||
value: "test"
|
value: "test"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
rule_file: "prometheus.rules"
|
rule_file: "prometheus.rules"
|
||||||
>
|
>
|
||||||
|
|
||||||
job: <
|
job: <
|
||||||
name: "prometheus"
|
name: "prometheus"
|
||||||
scrape_interval: "15s"
|
scrape_interval: "15s"
|
||||||
|
|
||||||
target_group: <
|
target_group: <
|
||||||
target: "http://localhost:9090/metrics.json"
|
target: "http://localhost:9090/metrics.json"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "group"
|
name: "group"
|
||||||
value: "canary"
|
value: "canary"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
||||||
job: <
|
job: <
|
||||||
name: "random"
|
name: "random"
|
||||||
scrape_interval: "30s"
|
scrape_interval: "30s"
|
||||||
|
|
||||||
target_group: <
|
target_group: <
|
||||||
target: "http://random.com:8080/metrics.json"
|
target: "http://random.com:8080/metrics.json"
|
||||||
target: "http://random.com:8081/metrics.json"
|
target: "http://random.com:8081/metrics.json"
|
||||||
target: "http://random.com:8082/metrics.json"
|
target: "http://random.com:8082/metrics.json"
|
||||||
target: "http://random.com:8083/metrics.json"
|
target: "http://random.com:8083/metrics.json"
|
||||||
target: "http://random.com:8084/metrics.json"
|
target: "http://random.com:8084/metrics.json"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "group"
|
name: "group"
|
||||||
value: "production"
|
value: "production"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
target_group: <
|
target_group: <
|
||||||
target: "http://random.com:8085/metrics.json"
|
target: "http://random.com:8085/metrics.json"
|
||||||
target: "http://random.com:8086/metrics.json"
|
target: "http://random.com:8086/metrics.json"
|
||||||
labels: <
|
labels: <
|
||||||
label: <
|
label: <
|
||||||
name: "group"
|
name: "group"
|
||||||
value: "canary"
|
value: "canary"
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
>
|
>
|
||||||
|
|
|
@ -109,6 +109,8 @@ const (
|
||||||
//
|
//
|
||||||
// Target implements extraction.Ingester.
|
// Target implements extraction.Ingester.
|
||||||
type Target interface {
|
type Target interface {
|
||||||
|
extraction.Ingester
|
||||||
|
|
||||||
// Return the last encountered scrape error, if any.
|
// Return the last encountered scrape error, if any.
|
||||||
LastError() error
|
LastError() error
|
||||||
// Return the health of the target.
|
// Return the health of the target.
|
||||||
|
@ -129,18 +131,13 @@ type Target interface {
|
||||||
// Return the target's base labels without job and instance label. That's
|
// Return the target's base labels without job and instance label. That's
|
||||||
// useful for display purposes.
|
// useful for display purposes.
|
||||||
BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet
|
BaseLabelsWithoutJobAndInstance() clientmodel.LabelSet
|
||||||
// SetBaseLabelsFrom queues a replacement of the current base labels by
|
// SetBaseLabelsFrom sets the target's base labels to the base labels
|
||||||
// the labels of the given target. The method returns immediately after
|
// of the provided target.
|
||||||
// queuing. The actual replacement of the base labels happens
|
|
||||||
// asynchronously (but most likely before the next scrape for the target
|
|
||||||
// begins).
|
|
||||||
SetBaseLabelsFrom(Target)
|
SetBaseLabelsFrom(Target)
|
||||||
// Scrape target at the specified interval.
|
// Scrape target at the specified interval.
|
||||||
RunScraper(storage.SampleAppender, time.Duration)
|
RunScraper(storage.SampleAppender, time.Duration)
|
||||||
// Stop scraping, synchronous.
|
// Stop scraping, synchronous.
|
||||||
StopScraper()
|
StopScraper()
|
||||||
// Ingest implements extraction.Ingester.
|
|
||||||
Ingest(clientmodel.Samples) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// target is a Target that refers to a singular HTTP or HTTPS endpoint.
|
// target is a Target that refers to a singular HTTP or HTTPS endpoint.
|
||||||
|
@ -155,8 +152,6 @@ type target struct {
|
||||||
scraperStopping chan struct{}
|
scraperStopping chan struct{}
|
||||||
// Closing scraperStopped signals that scraping has been stopped.
|
// Closing scraperStopped signals that scraping has been stopped.
|
||||||
scraperStopped chan struct{}
|
scraperStopped chan struct{}
|
||||||
// Channel to queue base labels to be replaced.
|
|
||||||
newBaseLabels chan clientmodel.LabelSet
|
|
||||||
// Channel to buffer ingested samples.
|
// Channel to buffer ingested samples.
|
||||||
ingestedSamples chan clientmodel.Samples
|
ingestedSamples chan clientmodel.Samples
|
||||||
|
|
||||||
|
@ -168,12 +163,8 @@ type target struct {
|
||||||
// The HTTP client used to scrape the target's endpoint.
|
// The HTTP client used to scrape the target's endpoint.
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
|
|
||||||
// Mutex protects lastError, lastScrape, state, and baseLabels. Writing
|
// Mutex protects lastError, lastScrape, state, and baseLabels.
|
||||||
// the above must only happen in the goroutine running the RunScraper
|
sync.RWMutex
|
||||||
// loop, and it must happen under the lock. In that way, no mutex lock
|
|
||||||
// is required for reading the above in the goroutine running the
|
|
||||||
// RunScraper loop, but only for reading in other goroutines.
|
|
||||||
sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTarget creates a reasonably configured target for querying.
|
// NewTarget creates a reasonably configured target for querying.
|
||||||
|
@ -184,7 +175,6 @@ func NewTarget(url string, deadline time.Duration, baseLabels clientmodel.LabelS
|
||||||
httpClient: utility.NewDeadlineClient(deadline),
|
httpClient: utility.NewDeadlineClient(deadline),
|
||||||
scraperStopping: make(chan struct{}),
|
scraperStopping: make(chan struct{}),
|
||||||
scraperStopped: make(chan struct{}),
|
scraperStopped: make(chan struct{}),
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
|
||||||
}
|
}
|
||||||
t.baseLabels = clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier())}
|
t.baseLabels = clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.InstanceIdentifier())}
|
||||||
for baseLabel, baseValue := range baseLabels {
|
for baseLabel, baseValue := range baseLabels {
|
||||||
|
@ -213,18 +203,7 @@ func (t *target) Ingest(s clientmodel.Samples) error {
|
||||||
|
|
||||||
// RunScraper implements Target.
|
// RunScraper implements Target.
|
||||||
func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time.Duration) {
|
func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time.Duration) {
|
||||||
defer func() {
|
defer close(t.scraperStopped)
|
||||||
// Need to drain t.newBaseLabels to not make senders block during shutdown.
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-t.newBaseLabels:
|
|
||||||
// Do nothing.
|
|
||||||
default:
|
|
||||||
close(t.scraperStopped)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
jitterTimer := time.NewTimer(time.Duration(float64(interval) * rand.Float64()))
|
jitterTimer := time.NewTimer(time.Duration(float64(interval) * rand.Float64()))
|
||||||
select {
|
select {
|
||||||
|
@ -245,31 +224,22 @@ func (t *target) RunScraper(sampleAppender storage.SampleAppender, interval time
|
||||||
|
|
||||||
// Explanation of the contraption below:
|
// Explanation of the contraption below:
|
||||||
//
|
//
|
||||||
// In case t.newBaseLabels or t.scraperStopping have something to receive,
|
// In case t.scraperStopping has something to receive, we want to read
|
||||||
// we want to read from those channels rather than starting a new scrape
|
// from that channel rather than starting a new scrape (which might take very
|
||||||
// (which might take very long). That's why the outer select has no
|
// long). That's why the outer select has no ticker.C. Should t.scraperStopping
|
||||||
// ticker.C. Should neither t.newBaseLabels nor t.scraperStopping have
|
// not have anything to receive, we go into the inner select, where ticker.C
|
||||||
// anything to receive, we go into the inner select, where ticker.C is
|
// is in the mix.
|
||||||
// in the mix.
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case newBaseLabels := <-t.newBaseLabels:
|
|
||||||
t.Lock() // Writing t.baseLabels requires the lock.
|
|
||||||
t.baseLabels = newBaseLabels
|
|
||||||
t.Unlock()
|
|
||||||
case <-t.scraperStopping:
|
case <-t.scraperStopping:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
select {
|
select {
|
||||||
case newBaseLabels := <-t.newBaseLabels:
|
|
||||||
t.Lock() // Writing t.baseLabels requires the lock.
|
|
||||||
t.baseLabels = newBaseLabels
|
|
||||||
t.Unlock()
|
|
||||||
case <-t.scraperStopping:
|
case <-t.scraperStopping:
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
took := time.Since(t.lastScrape)
|
|
||||||
t.Lock() // Write t.lastScrape requires locking.
|
t.Lock() // Write t.lastScrape requires locking.
|
||||||
|
took := time.Since(t.lastScrape)
|
||||||
t.lastScrape = time.Now()
|
t.lastScrape = time.Now()
|
||||||
t.Unlock()
|
t.Unlock()
|
||||||
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
targetIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||||
|
@ -290,8 +260,13 @@ func (t *target) StopScraper() {
|
||||||
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`
|
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`
|
||||||
|
|
||||||
func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) {
|
func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) {
|
||||||
|
t.RLock()
|
||||||
timestamp := clientmodel.Now()
|
timestamp := clientmodel.Now()
|
||||||
|
|
||||||
defer func(start time.Time) {
|
defer func(start time.Time) {
|
||||||
|
t.recordScrapeHealth(sampleAppender, timestamp, err == nil, time.Since(start))
|
||||||
|
t.RUnlock()
|
||||||
|
|
||||||
t.Lock() // Writing t.state and t.lastError requires the lock.
|
t.Lock() // Writing t.state and t.lastError requires the lock.
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.state = Healthy
|
t.state = Healthy
|
||||||
|
@ -300,7 +275,6 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) {
|
||||||
}
|
}
|
||||||
t.lastError = err
|
t.lastError = err
|
||||||
t.Unlock()
|
t.Unlock()
|
||||||
t.recordScrapeHealth(sampleAppender, timestamp, err == nil, time.Since(start))
|
|
||||||
}(time.Now())
|
}(time.Now())
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", t.URL(), nil)
|
req, err := http.NewRequest("GET", t.URL(), nil)
|
||||||
|
@ -344,22 +318,22 @@ func (t *target) scrape(sampleAppender storage.SampleAppender) (err error) {
|
||||||
|
|
||||||
// LastError implements Target.
|
// LastError implements Target.
|
||||||
func (t *target) LastError() error {
|
func (t *target) LastError() error {
|
||||||
t.Lock()
|
t.RLock()
|
||||||
defer t.Unlock()
|
defer t.RUnlock()
|
||||||
return t.lastError
|
return t.lastError
|
||||||
}
|
}
|
||||||
|
|
||||||
// State implements Target.
|
// State implements Target.
|
||||||
func (t *target) State() TargetState {
|
func (t *target) State() TargetState {
|
||||||
t.Lock()
|
t.RLock()
|
||||||
defer t.Unlock()
|
defer t.RUnlock()
|
||||||
return t.state
|
return t.state
|
||||||
}
|
}
|
||||||
|
|
||||||
// LastScrape implements Target.
|
// LastScrape implements Target.
|
||||||
func (t *target) LastScrape() time.Time {
|
func (t *target) LastScrape() time.Time {
|
||||||
t.Lock()
|
t.RLock()
|
||||||
defer t.Unlock()
|
defer t.RUnlock()
|
||||||
return t.lastScrape
|
return t.lastScrape
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,8 +380,8 @@ func (t *target) GlobalURL() string {
|
||||||
|
|
||||||
// BaseLabels implements Target.
|
// BaseLabels implements Target.
|
||||||
func (t *target) BaseLabels() clientmodel.LabelSet {
|
func (t *target) BaseLabels() clientmodel.LabelSet {
|
||||||
t.Lock()
|
t.RLock()
|
||||||
defer t.Unlock()
|
defer t.RUnlock()
|
||||||
return t.baseLabels
|
return t.baseLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,7 +401,9 @@ func (t *target) SetBaseLabelsFrom(newTarget Target) {
|
||||||
if t.URL() != newTarget.URL() {
|
if t.URL() != newTarget.URL() {
|
||||||
panic("targets don't refer to the same endpoint")
|
panic("targets don't refer to the same endpoint")
|
||||||
}
|
}
|
||||||
t.newBaseLabels <- newTarget.BaseLabels()
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
t.baseLabels = newTarget.BaseLabels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, healthy bool, scrapeDuration time.Duration) {
|
func (t *target) recordScrapeHealth(sampleAppender storage.SampleAppender, timestamp clientmodel.Timestamp, healthy bool, scrapeDuration time.Duration) {
|
||||||
|
|
|
@ -17,8 +17,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
clientmodel "github.com/prometheus/client_golang/model"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func testTargetPool(t testing.TB) {
|
func testTargetPool(t testing.TB) {
|
||||||
|
@ -84,9 +82,8 @@ func testTargetPool(t testing.TB) {
|
||||||
|
|
||||||
for _, input := range scenario.inputs {
|
for _, input := range scenario.inputs {
|
||||||
target := target{
|
target := target{
|
||||||
url: input.url,
|
url: input.url,
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
httpClient: &http.Client{},
|
||||||
httpClient: &http.Client{},
|
|
||||||
}
|
}
|
||||||
pool.addTarget(&target)
|
pool.addTarget(&target)
|
||||||
}
|
}
|
||||||
|
@ -118,7 +115,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
|
||||||
state: Unhealthy,
|
state: Unhealthy,
|
||||||
scraperStopping: make(chan struct{}),
|
scraperStopping: make(chan struct{}),
|
||||||
scraperStopped: make(chan struct{}),
|
scraperStopped: make(chan struct{}),
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
|
||||||
httpClient: &http.Client{},
|
httpClient: &http.Client{},
|
||||||
}
|
}
|
||||||
oldTarget2 := &target{
|
oldTarget2 := &target{
|
||||||
|
@ -126,7 +122,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
|
||||||
state: Unhealthy,
|
state: Unhealthy,
|
||||||
scraperStopping: make(chan struct{}),
|
scraperStopping: make(chan struct{}),
|
||||||
scraperStopped: make(chan struct{}),
|
scraperStopped: make(chan struct{}),
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
|
||||||
httpClient: &http.Client{},
|
httpClient: &http.Client{},
|
||||||
}
|
}
|
||||||
newTarget1 := &target{
|
newTarget1 := &target{
|
||||||
|
@ -134,7 +129,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
|
||||||
state: Healthy,
|
state: Healthy,
|
||||||
scraperStopping: make(chan struct{}),
|
scraperStopping: make(chan struct{}),
|
||||||
scraperStopped: make(chan struct{}),
|
scraperStopped: make(chan struct{}),
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
|
||||||
httpClient: &http.Client{},
|
httpClient: &http.Client{},
|
||||||
}
|
}
|
||||||
newTarget2 := &target{
|
newTarget2 := &target{
|
||||||
|
@ -142,7 +136,6 @@ func TestTargetPoolReplaceTargets(t *testing.T) {
|
||||||
state: Healthy,
|
state: Healthy,
|
||||||
scraperStopping: make(chan struct{}),
|
scraperStopping: make(chan struct{}),
|
||||||
scraperStopped: make(chan struct{}),
|
scraperStopped: make(chan struct{}),
|
||||||
newBaseLabels: make(chan clientmodel.LabelSet, 1),
|
|
||||||
httpClient: &http.Client{},
|
httpClient: &http.Client{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue