2015-01-21 11:07:45 -08:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
2013-01-04 03:17:31 -08:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2013-07-14 14:03:56 -07:00
|
|
|
|
2018-02-01 01:55:07 -08:00
|
|
|
package scrape
|
2012-12-25 04:50:36 -08:00
|
|
|
|
|
|
|
import (
|
2023-11-02 13:45:07 -07:00
|
|
|
"errors"
|
2013-01-04 05:41:47 -08:00
|
|
|
"fmt"
|
2016-02-28 10:56:18 -08:00
|
|
|
"hash/fnv"
|
2016-11-22 03:48:30 -08:00
|
|
|
"net"
|
2015-02-19 09:58:47 -08:00
|
|
|
"net/url"
|
2013-04-10 05:26:07 -07:00
|
|
|
"strings"
|
2014-12-03 09:07:23 -08:00
|
|
|
"sync"
|
2012-12-25 04:50:36 -08:00
|
|
|
"time"
|
2013-06-25 05:02:27 -07:00
|
|
|
|
2015-08-20 08:18:46 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2013-07-30 08:18:07 -07:00
|
|
|
|
2015-04-20 03:24:25 -07:00
|
|
|
"github.com/prometheus/prometheus/config"
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 12:01:34 -08:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
2023-04-21 12:14:19 -07:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/relabel"
|
|
|
|
"github.com/prometheus/prometheus/model/value"
|
2015-03-14 19:36:15 -07:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2012-12-25 04:50:36 -08:00
|
|
|
)
|
|
|
|
|
2015-05-18 04:14:41 -07:00
|
|
|
// TargetHealth describes the health state of a target.
|
2016-02-28 11:23:26 -08:00
|
|
|
type TargetHealth string
|
2015-08-21 14:01:08 -07:00
|
|
|
|
2016-02-28 11:23:26 -08:00
|
|
|
// The possible health states of a target based on the last performed scrape.
|
2012-12-25 04:50:36 -08:00
|
|
|
const (
|
2016-02-28 11:23:26 -08:00
|
|
|
HealthUnknown TargetHealth = "unknown"
|
|
|
|
HealthGood TargetHealth = "up"
|
|
|
|
HealthBad TargetHealth = "down"
|
2012-12-25 04:50:36 -08:00
|
|
|
)
|
|
|
|
|
2015-05-18 04:14:41 -07:00
|
|
|
// Target refers to a singular HTTP or HTTPS endpoint.
|
|
|
|
type Target struct {
|
2015-06-05 13:42:39 -07:00
|
|
|
// Labels before any processing.
|
2016-12-29 00:27:30 -08:00
|
|
|
discoveredLabels labels.Labels
|
2016-02-15 01:31:38 -08:00
|
|
|
// Any labels that are added to this target and its metrics.
|
2016-12-29 00:27:30 -08:00
|
|
|
labels labels.Labels
|
2019-08-02 06:52:15 -07:00
|
|
|
// Additional URL parameters that are part of the target URL.
|
2016-02-28 10:56:18 -08:00
|
|
|
params url.Values
|
2016-02-28 11:23:26 -08:00
|
|
|
|
2018-10-12 09:26:59 -07:00
|
|
|
mtx sync.RWMutex
|
|
|
|
lastError error
|
|
|
|
lastScrape time.Time
|
|
|
|
lastScrapeDuration time.Duration
|
|
|
|
health TargetHealth
|
2019-12-04 07:18:27 -08:00
|
|
|
metadata MetricMetadataStore
|
2012-12-25 04:50:36 -08:00
|
|
|
}
|
|
|
|
|
2014-12-10 07:16:49 -08:00
|
|
|
// NewTarget creates a reasonably configured target for querying.
|
2016-12-29 00:27:30 -08:00
|
|
|
func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target {
|
2016-02-28 10:21:50 -08:00
|
|
|
return &Target{
|
2016-12-02 04:31:43 -08:00
|
|
|
labels: labels,
|
|
|
|
discoveredLabels: discoveredLabels,
|
|
|
|
params: params,
|
|
|
|
health: HealthUnknown,
|
2013-01-04 05:41:47 -08:00
|
|
|
}
|
2015-03-14 19:36:15 -07:00
|
|
|
}
|
2013-04-16 08:22:10 -07:00
|
|
|
|
2015-05-18 04:14:41 -07:00
|
|
|
func (t *Target) String() string {
|
2016-02-28 10:56:18 -08:00
|
|
|
return t.URL().String()
|
2016-02-12 06:43:27 -08:00
|
|
|
}
|
|
|
|
|
2020-11-19 07:23:03 -08:00
|
|
|
// MetricMetadataStore represents a storage for metadata.
|
2019-12-04 07:18:27 -08:00
|
|
|
type MetricMetadataStore interface {
|
|
|
|
ListMetadata() []MetricMetadata
|
|
|
|
GetMetadata(metric string) (MetricMetadata, bool)
|
2020-01-29 03:13:18 -08:00
|
|
|
SizeMetadata() int
|
|
|
|
LengthMetadata() int
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// MetricMetadata is a piece of metadata for a metric.
|
|
|
|
type MetricMetadata struct {
|
|
|
|
Metric string
|
2023-12-12 04:14:36 -08:00
|
|
|
Type model.MetricType
|
2018-05-18 00:32:11 -07:00
|
|
|
Help string
|
2018-10-05 09:11:16 -07:00
|
|
|
Unit string
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2023-11-22 10:50:57 -08:00
|
|
|
func (t *Target) ListMetadata() []MetricMetadata {
|
2018-05-18 00:32:11 -07:00
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
if t.metadata == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-04 07:18:27 -08:00
|
|
|
return t.metadata.ListMetadata()
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2023-11-22 10:50:57 -08:00
|
|
|
func (t *Target) SizeMetadata() int {
|
2020-01-29 03:13:18 -08:00
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
if t.metadata == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return t.metadata.SizeMetadata()
|
|
|
|
}
|
|
|
|
|
2023-11-22 10:50:57 -08:00
|
|
|
func (t *Target) LengthMetadata() int {
|
2020-01-29 03:13:18 -08:00
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
if t.metadata == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return t.metadata.LengthMetadata()
|
|
|
|
}
|
|
|
|
|
2023-11-22 10:50:57 -08:00
|
|
|
// GetMetadata returns type and help metadata for the given metric.
|
|
|
|
func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) {
|
2018-05-18 00:32:11 -07:00
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
if t.metadata == nil {
|
|
|
|
return MetricMetadata{}, false
|
|
|
|
}
|
2019-12-04 07:18:27 -08:00
|
|
|
return t.metadata.GetMetadata(metric)
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
2018-05-18 00:32:11 -07:00
|
|
|
t.mtx.Lock()
|
|
|
|
defer t.mtx.Unlock()
|
|
|
|
t.metadata = s
|
|
|
|
}
|
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
// hash returns an identifying hash for the target.
|
|
|
|
func (t *Target) hash() uint64 {
|
|
|
|
h := fnv.New64a()
|
2021-08-31 08:37:32 -07:00
|
|
|
|
2021-10-20 14:44:45 -07:00
|
|
|
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
2016-02-28 10:56:18 -08:00
|
|
|
h.Write([]byte(t.URL().String()))
|
2016-02-15 06:22:57 -08:00
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
return h.Sum64()
|
2016-02-15 06:22:57 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// offset returns the time until the next scrape cycle for the target.
|
2023-06-05 08:36:11 -07:00
|
|
|
// It includes the global server offsetSeed for scrapes from multiple Prometheus to try to be at different times.
|
|
|
|
func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration {
|
2016-02-15 06:22:57 -08:00
|
|
|
now := time.Now().UnixNano()
|
|
|
|
|
2019-03-12 03:46:15 -07:00
|
|
|
// Base is a pinned to absolute time, no matter how often offset is called.
|
2016-02-15 06:22:57 -08:00
|
|
|
var (
|
2018-12-05 01:58:39 -08:00
|
|
|
base = int64(interval) - now%int64(interval)
|
2023-06-05 08:36:11 -07:00
|
|
|
offset = (t.hash() ^ offsetSeed) % uint64(interval)
|
2016-02-15 06:22:57 -08:00
|
|
|
next = base + int64(offset)
|
|
|
|
)
|
|
|
|
|
|
|
|
if next > int64(interval) {
|
|
|
|
next -= int64(interval)
|
|
|
|
}
|
|
|
|
return time.Duration(next)
|
|
|
|
}
|
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
// Labels returns a copy of the set of all public labels of the target.
|
2023-04-16 05:15:13 -07:00
|
|
|
func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels {
|
|
|
|
b.Reset()
|
2022-03-09 14:26:24 -08:00
|
|
|
t.labels.Range(func(l labels.Label) {
|
2016-12-29 00:27:30 -08:00
|
|
|
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
|
2022-03-09 14:26:24 -08:00
|
|
|
b.Add(l.Name, l.Value)
|
2016-02-28 10:56:18 -08:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
|
|
|
return b.Labels()
|
2016-02-12 06:43:27 -08:00
|
|
|
}
|
|
|
|
|
2023-03-07 09:10:15 -08:00
|
|
|
// LabelsRange calls f on each public label of the target.
|
|
|
|
func (t *Target) LabelsRange(f func(l labels.Label)) {
|
|
|
|
t.labels.Range(func(l labels.Label) {
|
|
|
|
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
|
|
|
|
f(l)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-12-02 04:31:43 -08:00
|
|
|
// DiscoveredLabels returns a copy of the target's labels before any processing.
|
2016-12-29 00:27:30 -08:00
|
|
|
func (t *Target) DiscoveredLabels() labels.Labels {
|
2018-04-09 14:08:26 -07:00
|
|
|
t.mtx.Lock()
|
|
|
|
defer t.mtx.Unlock()
|
2022-03-09 14:26:24 -08:00
|
|
|
return t.discoveredLabels.Copy()
|
2016-02-12 06:43:27 -08:00
|
|
|
}
|
|
|
|
|
2023-10-03 13:09:25 -07:00
|
|
|
// SetDiscoveredLabels sets new DiscoveredLabels.
|
2018-02-07 02:29:27 -08:00
|
|
|
func (t *Target) SetDiscoveredLabels(l labels.Labels) {
|
2018-04-09 14:08:26 -07:00
|
|
|
t.mtx.Lock()
|
|
|
|
defer t.mtx.Unlock()
|
2018-02-07 02:29:27 -08:00
|
|
|
t.discoveredLabels = l
|
|
|
|
}
|
|
|
|
|
2016-02-12 06:43:27 -08:00
|
|
|
// URL returns a copy of the target's URL.
|
|
|
|
func (t *Target) URL() *url.URL {
|
|
|
|
params := url.Values{}
|
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
for k, v := range t.params {
|
2016-02-12 06:43:27 -08:00
|
|
|
params[k] = make([]string, len(v))
|
|
|
|
copy(params[k], v)
|
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
t.labels.Range(func(l labels.Label) {
|
2016-12-29 00:27:30 -08:00
|
|
|
if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) {
|
2022-03-09 14:26:24 -08:00
|
|
|
return
|
2016-02-12 06:43:27 -08:00
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
ks := l.Name[len(model.ParamLabelPrefix):]
|
2016-02-12 06:43:27 -08:00
|
|
|
|
|
|
|
if len(params[ks]) > 0 {
|
2018-03-21 02:25:22 -07:00
|
|
|
params[ks][0] = l.Value
|
2016-02-12 06:43:27 -08:00
|
|
|
} else {
|
2016-12-29 00:27:30 -08:00
|
|
|
params[ks] = []string{l.Value}
|
2016-02-12 06:43:27 -08:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
2016-02-12 06:43:27 -08:00
|
|
|
|
|
|
|
return &url.URL{
|
2018-03-21 02:25:22 -07:00
|
|
|
Scheme: t.labels.Get(model.SchemeLabel),
|
|
|
|
Host: t.labels.Get(model.AddressLabel),
|
|
|
|
Path: t.labels.Get(model.MetricsPathLabel),
|
2016-02-12 06:43:27 -08:00
|
|
|
RawQuery: params.Encode(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-11 13:42:24 -08:00
|
|
|
// Report sets target data about the last scrape.
|
|
|
|
func (t *Target) Report(start time.Time, dur time.Duration, err error) {
|
2016-02-28 11:23:26 -08:00
|
|
|
t.mtx.Lock()
|
|
|
|
defer t.mtx.Unlock()
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
t.health = HealthGood
|
|
|
|
} else {
|
|
|
|
t.health = HealthBad
|
|
|
|
}
|
|
|
|
|
|
|
|
t.lastError = err
|
|
|
|
t.lastScrape = start
|
2018-10-12 09:26:59 -07:00
|
|
|
t.lastScrapeDuration = dur
|
2016-02-28 11:23:26 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// LastError returns the error encountered during the last scrape.
|
|
|
|
func (t *Target) LastError() error {
|
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
return t.lastError
|
|
|
|
}
|
|
|
|
|
|
|
|
// LastScrape returns the time of the last scrape.
|
|
|
|
func (t *Target) LastScrape() time.Time {
|
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
return t.lastScrape
|
|
|
|
}
|
|
|
|
|
2018-10-12 09:26:59 -07:00
|
|
|
// LastScrapeDuration returns how long the last scrape of the target took.
|
|
|
|
func (t *Target) LastScrapeDuration() time.Duration {
|
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
return t.lastScrapeDuration
|
|
|
|
}
|
|
|
|
|
2016-02-28 11:23:26 -08:00
|
|
|
// Health returns the last known health state of the target.
|
|
|
|
func (t *Target) Health() TargetHealth {
|
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
return t.health
|
2016-02-15 07:21:03 -08:00
|
|
|
}
|
|
|
|
|
2021-08-31 08:37:32 -07:00
|
|
|
// intervalAndTimeout returns the interval and timeout derived from
|
|
|
|
// the targets labels.
|
|
|
|
func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Duration) (time.Duration, time.Duration, error) {
|
|
|
|
t.mtx.RLock()
|
|
|
|
defer t.mtx.RUnlock()
|
|
|
|
|
|
|
|
intervalLabel := t.labels.Get(model.ScrapeIntervalLabel)
|
|
|
|
interval, err := model.ParseDuration(intervalLabel)
|
|
|
|
if err != nil {
|
2023-11-02 13:45:07 -07:00
|
|
|
return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err)
|
2021-08-31 08:37:32 -07:00
|
|
|
}
|
|
|
|
timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel)
|
|
|
|
timeout, err := model.ParseDuration(timeoutLabel)
|
|
|
|
if err != nil {
|
2023-11-02 13:45:07 -07:00
|
|
|
return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err)
|
2021-08-31 08:37:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return time.Duration(interval), time.Duration(timeout), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetValue gets a label value from the entire label set.
|
|
|
|
func (t *Target) GetValue(name string) string {
|
|
|
|
return t.labels.Get(name)
|
|
|
|
}
|
|
|
|
|
2016-03-02 00:10:20 -08:00
|
|
|
// Targets is a sortable list of targets.
|
|
|
|
type Targets []*Target
|
|
|
|
|
|
|
|
func (ts Targets) Len() int { return len(ts) }
|
|
|
|
func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() }
|
|
|
|
func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
|
|
|
|
|
2023-04-21 12:14:19 -07:00
|
|
|
var (
|
|
|
|
errSampleLimit = errors.New("sample limit exceeded")
|
|
|
|
errBucketLimit = errors.New("histogram bucket limit exceeded")
|
|
|
|
)
|
2017-05-29 06:08:55 -07:00
|
|
|
|
2017-01-30 08:30:28 -08:00
|
|
|
// limitAppender limits the number of total appended samples in a batch.
|
|
|
|
type limitAppender struct {
|
|
|
|
storage.Appender
|
|
|
|
|
|
|
|
limit int
|
|
|
|
i int
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
2017-05-29 06:08:55 -07:00
|
|
|
if !value.IsStaleNaN(v) {
|
|
|
|
app.i++
|
|
|
|
if app.i > app.limit {
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, errSampleLimit
|
2017-05-29 06:08:55 -07:00
|
|
|
}
|
2017-02-01 06:59:37 -08:00
|
|
|
}
|
2021-02-18 04:07:00 -08:00
|
|
|
ref, err := app.Appender.Append(ref, lset, t, v)
|
2017-02-01 06:59:37 -08:00
|
|
|
if err != nil {
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, err
|
2017-02-01 06:59:37 -08:00
|
|
|
}
|
|
|
|
return ref, nil
|
|
|
|
}
|
|
|
|
|
2017-07-04 05:55:33 -07:00
|
|
|
type timeLimitAppender struct {
|
|
|
|
storage.Appender
|
|
|
|
|
|
|
|
maxTime int64
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
2017-07-04 05:55:33 -07:00
|
|
|
if t > app.maxTime {
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, storage.ErrOutOfBounds
|
2017-07-04 05:55:33 -07:00
|
|
|
}
|
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
ref, err := app.Appender.Append(ref, lset, t, v)
|
2017-07-04 05:55:33 -07:00
|
|
|
if err != nil {
|
2017-09-07 05:14:41 -07:00
|
|
|
return 0, err
|
2017-07-04 05:55:33 -07:00
|
|
|
}
|
|
|
|
return ref, nil
|
|
|
|
}
|
|
|
|
|
2023-04-21 12:14:19 -07:00
|
|
|
// bucketLimitAppender limits the number of total appended samples in a batch.
|
|
|
|
type bucketLimitAppender struct {
|
|
|
|
storage.Appender
|
|
|
|
|
|
|
|
limit int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
|
|
if h != nil {
|
2024-06-18 06:21:17 -07:00
|
|
|
// Return with an early error if the histogram has too many buckets and the
|
|
|
|
// schema is not exponential, in which case we can't reduce the resolution.
|
2024-06-11 02:08:25 -07:00
|
|
|
if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(h.Schema) {
|
2024-06-07 03:50:59 -07:00
|
|
|
return 0, errBucketLimit
|
|
|
|
}
|
2023-11-11 06:24:47 -08:00
|
|
|
for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit {
|
2024-06-07 03:50:59 -07:00
|
|
|
if h.Schema <= histogram.ExponentialSchemaMin {
|
2023-11-11 06:24:47 -08:00
|
|
|
return 0, errBucketLimit
|
|
|
|
}
|
2023-11-21 00:56:56 -08:00
|
|
|
h = h.ReduceResolution(h.Schema - 1)
|
2023-04-21 12:14:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if fh != nil {
|
2024-06-18 06:22:03 -07:00
|
|
|
// Return with an early error if the histogram has too many buckets and the
|
|
|
|
// schema is not exponential, in which case we can't reduce the resolution.
|
2024-06-11 02:08:25 -07:00
|
|
|
if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit && !histogram.IsExponentialSchema(fh.Schema) {
|
2024-06-07 03:50:59 -07:00
|
|
|
return 0, errBucketLimit
|
|
|
|
}
|
2023-11-11 06:24:47 -08:00
|
|
|
for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit {
|
2024-06-07 03:50:59 -07:00
|
|
|
if fh.Schema <= histogram.ExponentialSchemaMin {
|
2023-11-11 06:24:47 -08:00
|
|
|
return 0, errBucketLimit
|
|
|
|
}
|
2023-11-21 00:56:56 -08:00
|
|
|
fh = fh.ReduceResolution(fh.Schema - 1)
|
2023-04-21 12:14:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return ref, nil
|
|
|
|
}
|
|
|
|
|
2024-01-17 07:58:54 -08:00
|
|
|
type maxSchemaAppender struct {
|
|
|
|
storage.Appender
|
|
|
|
|
|
|
|
maxSchema int32
|
|
|
|
}
|
|
|
|
|
|
|
|
func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
|
|
|
if h != nil {
|
2024-02-28 05:06:43 -08:00
|
|
|
if histogram.IsExponentialSchema(h.Schema) && h.Schema > app.maxSchema {
|
2024-01-17 07:58:54 -08:00
|
|
|
h = h.ReduceResolution(app.maxSchema)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fh != nil {
|
2024-02-28 05:06:43 -08:00
|
|
|
if histogram.IsExponentialSchema(fh.Schema) && fh.Schema > app.maxSchema {
|
2024-01-17 07:58:54 -08:00
|
|
|
fh = fh.ReduceResolution(app.maxSchema)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return ref, nil
|
|
|
|
}
|
|
|
|
|
2021-11-01 06:42:12 -07:00
|
|
|
// PopulateLabels builds a label set from the given label set and scrape configuration.
|
2016-11-22 03:48:30 -08:00
|
|
|
// It returns a label set before relabeling was applied as the second return value.
|
2017-12-04 07:12:28 -08:00
|
|
|
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
|
2023-02-28 11:35:31 -08:00
|
|
|
func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) {
|
2016-12-29 00:27:30 -08:00
|
|
|
// Copy labels into the labelset for the target if they are not set already.
|
|
|
|
scrapeLabels := []labels.Label{
|
|
|
|
{Name: model.JobLabel, Value: cfg.JobName},
|
2021-08-31 08:37:32 -07:00
|
|
|
{Name: model.ScrapeIntervalLabel, Value: cfg.ScrapeInterval.String()},
|
|
|
|
{Name: model.ScrapeTimeoutLabel, Value: cfg.ScrapeTimeout.String()},
|
2016-12-29 00:27:30 -08:00
|
|
|
{Name: model.MetricsPathLabel, Value: cfg.MetricsPath},
|
|
|
|
{Name: model.SchemeLabel, Value: cfg.Scheme},
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
|
|
|
|
for _, l := range scrapeLabels {
|
2023-02-28 11:32:58 -08:00
|
|
|
if lb.Get(l.Name) == "" {
|
2016-12-29 00:27:30 -08:00
|
|
|
lb.Set(l.Name, l.Value)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Encode scrape query parameters as labels.
|
|
|
|
for k, v := range cfg.Params {
|
2022-07-15 06:20:44 -07:00
|
|
|
if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" {
|
|
|
|
lb.Set(name, v[0])
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-22 08:46:02 -07:00
|
|
|
preRelabelLabels := lb.Labels()
|
2023-02-28 11:32:58 -08:00
|
|
|
keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...)
|
2016-11-22 03:48:30 -08:00
|
|
|
|
|
|
|
// Check if the target was dropped.
|
2022-03-09 14:26:24 -08:00
|
|
|
if !keep {
|
|
|
|
return labels.EmptyLabels(), preRelabelLabels, nil
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2023-02-28 11:32:58 -08:00
|
|
|
if v := lb.Get(model.AddressLabel); v == "" {
|
2022-03-09 14:26:24 -08:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
|
2017-06-09 08:18:19 -07:00
|
|
|
}
|
2016-11-22 03:48:30 -08:00
|
|
|
|
|
|
|
// addPort checks whether we should add a default port to the address.
|
|
|
|
// If the address is not valid, we don't append a port either.
|
2022-07-20 04:35:47 -07:00
|
|
|
addPort := func(s string) (string, string, bool) {
|
2016-11-22 03:48:30 -08:00
|
|
|
// If we can split, a port exists and we don't have to add one.
|
2022-07-20 04:35:47 -07:00
|
|
|
if host, port, err := net.SplitHostPort(s); err == nil {
|
|
|
|
return host, port, false
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
// If adding a port makes it valid, the previous error
|
|
|
|
// was not due to an invalid address and we can append a port.
|
|
|
|
_, _, err := net.SplitHostPort(s + ":1234")
|
2022-07-20 04:35:47 -07:00
|
|
|
return "", "", err == nil
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2022-07-20 04:35:47 -07:00
|
|
|
|
2023-02-28 11:32:58 -08:00
|
|
|
addr := lb.Get(model.AddressLabel)
|
|
|
|
scheme := lb.Get(model.SchemeLabel)
|
2022-07-20 04:35:47 -07:00
|
|
|
host, port, add := addPort(addr)
|
|
|
|
// If it's an address with no trailing port, infer it based on the used scheme
|
|
|
|
// unless the no-default-scrape-port feature flag is present.
|
|
|
|
if !noDefaultPort && add {
|
2016-11-22 03:48:30 -08:00
|
|
|
// Addresses reaching this point are already wrapped in [] if necessary.
|
2022-07-20 04:35:47 -07:00
|
|
|
switch scheme {
|
2016-11-22 03:48:30 -08:00
|
|
|
case "http", "":
|
2023-04-09 00:08:40 -07:00
|
|
|
addr += ":80"
|
2016-11-22 03:48:30 -08:00
|
|
|
case "https":
|
2023-04-09 00:08:40 -07:00
|
|
|
addr += ":443"
|
2016-11-22 03:48:30 -08:00
|
|
|
default:
|
2023-11-02 13:45:07 -07:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
lb.Set(model.AddressLabel, addr)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
|
2022-07-20 04:35:47 -07:00
|
|
|
if noDefaultPort {
|
|
|
|
// If it's an address with a trailing default port and the
|
|
|
|
// no-default-scrape-port flag is present, remove the port.
|
|
|
|
switch port {
|
|
|
|
case "80":
|
|
|
|
if scheme == "http" {
|
|
|
|
lb.Set(model.AddressLabel, host)
|
|
|
|
}
|
|
|
|
case "443":
|
|
|
|
if scheme == "https" {
|
|
|
|
lb.Set(model.AddressLabel, host)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
|
2022-03-09 14:26:24 -08:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), err
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
|
2023-02-28 11:32:58 -08:00
|
|
|
interval := lb.Get(model.ScrapeIntervalLabel)
|
2021-12-16 04:28:46 -08:00
|
|
|
intervalDuration, err := model.ParseDuration(interval)
|
|
|
|
if err != nil {
|
2023-11-02 13:45:07 -07:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err)
|
2021-12-16 04:28:46 -08:00
|
|
|
}
|
|
|
|
if time.Duration(intervalDuration) == 0 {
|
2022-03-09 14:26:24 -08:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0")
|
2021-08-31 08:37:32 -07:00
|
|
|
}
|
|
|
|
|
2023-02-28 11:32:58 -08:00
|
|
|
timeout := lb.Get(model.ScrapeTimeoutLabel)
|
2021-12-16 04:28:46 -08:00
|
|
|
timeoutDuration, err := model.ParseDuration(timeout)
|
|
|
|
if err != nil {
|
2023-11-02 13:45:07 -07:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err)
|
2021-12-16 04:28:46 -08:00
|
|
|
}
|
|
|
|
if time.Duration(timeoutDuration) == 0 {
|
2022-03-09 14:26:24 -08:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0")
|
2021-08-31 08:37:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if timeoutDuration > intervalDuration {
|
2023-11-02 13:45:07 -07:00
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
|
2021-08-31 08:37:32 -07:00
|
|
|
}
|
|
|
|
|
2016-11-22 03:48:30 -08:00
|
|
|
// Meta labels are deleted after relabelling. Other internal labels propagate to
|
|
|
|
// the target which decides whether they will be part of their label set.
|
2023-02-28 11:32:58 -08:00
|
|
|
lb.Range(func(l labels.Label) {
|
2016-12-29 00:27:30 -08:00
|
|
|
if strings.HasPrefix(l.Name, model.MetaLabelPrefix) {
|
|
|
|
lb.Del(l.Name)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
2016-11-22 03:48:30 -08:00
|
|
|
|
|
|
|
// Default the instance label to the target address.
|
2023-02-28 11:32:58 -08:00
|
|
|
if v := lb.Get(model.InstanceLabel); v == "" {
|
2016-12-29 00:27:30 -08:00
|
|
|
lb.Set(model.InstanceLabel, addr)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2017-06-23 04:15:44 -07:00
|
|
|
|
2023-03-22 08:46:02 -07:00
|
|
|
res = lb.Labels()
|
2022-03-09 14:26:24 -08:00
|
|
|
err = res.Validate(func(l labels.Label) error {
|
2017-06-23 04:15:44 -07:00
|
|
|
// Check label values are valid, drop the target if not.
|
|
|
|
if !model.LabelValue(l.Value).IsValid() {
|
2023-11-02 13:45:07 -07:00
|
|
|
return fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value)
|
2017-06-23 04:15:44 -07:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return labels.EmptyLabels(), labels.EmptyLabels(), err
|
2017-06-23 04:15:44 -07:00
|
|
|
}
|
|
|
|
return res, preRelabelLabels, nil
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
|
2021-10-27 17:01:28 -07:00
|
|
|
// TargetsFromGroup builds targets based on the given TargetGroup and config.
|
2023-03-07 01:23:34 -08:00
|
|
|
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) {
|
|
|
|
targets = targets[:0]
|
2021-05-28 14:50:59 -07:00
|
|
|
failures := []error{}
|
2016-11-22 03:48:30 -08:00
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
for i, tlset := range tg.Targets {
|
2023-02-28 11:35:31 -08:00
|
|
|
lb.Reset(labels.EmptyLabels())
|
2016-12-29 00:27:30 -08:00
|
|
|
|
|
|
|
for ln, lv := range tlset {
|
2023-02-28 11:35:31 -08:00
|
|
|
lb.Set(string(ln), string(lv))
|
2016-12-29 00:27:30 -08:00
|
|
|
}
|
2016-11-22 03:48:30 -08:00
|
|
|
for ln, lv := range tg.Labels {
|
2016-12-29 00:27:30 -08:00
|
|
|
if _, ok := tlset[ln]; !ok {
|
2023-02-28 11:35:31 -08:00
|
|
|
lb.Set(string(ln), string(lv))
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
}
|
2016-12-29 00:27:30 -08:00
|
|
|
|
2023-02-28 11:35:31 -08:00
|
|
|
lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort)
|
2016-11-22 03:48:30 -08:00
|
|
|
if err != nil {
|
2023-11-02 13:45:07 -07:00
|
|
|
failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err))
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
if !lset.IsEmpty() || !origLabels.IsEmpty() {
|
|
|
|
targets = append(targets, NewTarget(lset, origLabels, cfg.Params))
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
}
|
2021-05-28 14:50:59 -07:00
|
|
|
return targets, failures
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|