mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
s/scrape_classic_histograms/always_scrape_classic_histograms (3.0 breaking change) (#15178)
This is for readability, especially when we can converting to nhcb option. See discussion https://cloud-native.slack.com/archives/C077Z4V13AM/p1729155873397889 Signed-off-by: bwplotka <bwplotka@gmail.com>
This commit is contained in:
parent
2cabd1b707
commit
efc43d0714
|
@ -163,13 +163,13 @@ var (
|
|||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
}
|
||||
|
||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
|
@ -631,8 +631,8 @@ type ScrapeConfig struct {
|
|||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
|
|
|
@ -215,9 +215,9 @@ job_name: <job_name>
|
|||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||
|
||||
# Whether to scrape a classic histogram that is also exposed as a native
|
||||
# Whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ scrape_classic_histograms: <boolean> | default = false ]
|
||||
[ always_scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
||||
# The HTTP resource path on which to fetch metrics from targets.
|
||||
[ metrics_path: <path> | default = /metrics ]
|
||||
|
|
|
@ -84,7 +84,7 @@ those classic histograms that do not come with a corresponding native
|
|||
histogram. However, if a native histogram is present, Prometheus will ignore
|
||||
the corresponding classic histogram, with the notable exception of exemplars,
|
||||
which are always ingested. To keep the classic histograms as well, enable
|
||||
`scrape_classic_histograms` in the scrape job.
|
||||
`always_scrape_classic_histograms` in the scrape job.
|
||||
|
||||
_Note about the format of `le` and `quantile` label values:_
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ type scrapeLoopOptions struct {
|
|||
trackTimestampsStaleness bool
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeClassicHistograms bool
|
||||
alwaysScrapeClassicHist bool
|
||||
validationScheme model.ValidationScheme
|
||||
|
||||
mrc []*relabel.Config
|
||||
|
@ -179,7 +179,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
opts.labelLimits,
|
||||
opts.interval,
|
||||
opts.timeout,
|
||||
opts.scrapeClassicHistograms,
|
||||
opts.alwaysScrapeClassicHist,
|
||||
options.EnableNativeHistogramsIngestion,
|
||||
options.EnableCreatedTimestampZeroIngestion,
|
||||
options.ExtraMetrics,
|
||||
|
@ -480,7 +480,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
enableCompression = sp.config.EnableCompression
|
||||
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms
|
||||
)
|
||||
|
||||
validationScheme := model.UTF8Validation
|
||||
|
@ -521,7 +521,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
mrc: mrc,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
validationScheme: validationScheme,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -883,7 +883,7 @@ type scrapeLoop struct {
|
|||
labelLimits *labelLimits
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
scrapeClassicHistograms bool
|
||||
alwaysScrapeClassicHist bool
|
||||
validationScheme model.ValidationScheme
|
||||
|
||||
// Feature flagged options.
|
||||
|
@ -1183,7 +1183,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
labelLimits *labelLimits,
|
||||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
scrapeClassicHistograms bool,
|
||||
alwaysScrapeClassicHist bool,
|
||||
enableNativeHistogramIngestion bool,
|
||||
enableCTZeroIngestion bool,
|
||||
reportExtraMetrics bool,
|
||||
|
@ -1237,7 +1237,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
labelLimits: labelLimits,
|
||||
interval: interval,
|
||||
timeout: timeout,
|
||||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
|
||||
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
|
||||
enableCTZeroIngestion: enableCTZeroIngestion,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
|
@ -1537,7 +1537,7 @@ type appendErrors struct {
|
|||
}
|
||||
|
||||
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
||||
p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable)
|
||||
p, err := textparse.New(b, contentType, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable)
|
||||
if err != nil {
|
||||
sl.l.Debug(
|
||||
"Invalid content type on scrape, using prometheus parser as fallback.",
|
||||
|
|
|
@ -1846,7 +1846,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
|
|||
func TestScrapeLoopAppendExemplar(t *testing.T) {
|
||||
tests := []struct {
|
||||
title string
|
||||
scrapeClassicHistograms bool
|
||||
alwaysScrapeClassicHist bool
|
||||
enableNativeHistogramsIngestion bool
|
||||
scrapeText string
|
||||
contentType string
|
||||
|
@ -2115,7 +2115,7 @@ metric: <
|
|||
>
|
||||
|
||||
`,
|
||||
scrapeClassicHistograms: true,
|
||||
alwaysScrapeClassicHist: true,
|
||||
contentType: "application/vnd.google.protobuf",
|
||||
floats: []floatSample{
|
||||
{metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175},
|
||||
|
@ -2177,7 +2177,7 @@ metric: <
|
|||
sl.reportSampleMutator = func(l labels.Labels) labels.Labels {
|
||||
return mutateReportSampleLabels(l, discoveryLabels)
|
||||
}
|
||||
sl.scrapeClassicHistograms = test.scrapeClassicHistograms
|
||||
sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist
|
||||
|
||||
now := time.Now()
|
||||
|
||||
|
|
Loading…
Reference in a new issue