Merge pull request #14160 from alex-kattathra-johnson/issue-13959
Some checks are pending
buf.build / lint and publish (push) Waiting to run
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run

Remove no-default-scrape-port featureFlag
This commit is contained in:
Björn Rabenstein 2024-09-26 18:45:56 +02:00 committed by GitHub
commit f74722841b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 32 additions and 100 deletions

View file

@ -217,9 +217,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "concurrent-rule-eval": case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true c.enableConcurrentRuleEval = true
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
case "no-default-scrape-port":
c.scrape.NoDefaultPort = true
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
case "promql-experimental-functions": case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
@ -474,7 +471,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)

View file

@ -291,7 +291,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@ -321,24 +321,21 @@ func main() {
} }
} }
var noDefaultScrapePort bool
for _, f := range *featureList { for _, f := range *featureList {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "no-default-scrape-port":
noDefaultScrapePort = true
case "": case "":
continue continue
default: default:
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) fmt.Printf(" WARNING: --enable-feature is currently a no-op")
} }
} }
} }
switch parsedCmd { switch parsedCmd {
case sdCheckCmd.FullCommand(): case sdCheckCmd.FullCommand():
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
case checkConfigCmd.FullCommand(): case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
for _, tg := range targetGroups { for _, tg := range targetGroups {
var failures []error var failures []error
targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb)
if len(failures) > 0 { if len(failures) > 0 {
first := failures[0] first := failures[0]
return first return first

View file

@ -38,7 +38,7 @@ type sdCheckResult struct {
} }
// CheckSD performs service discovery for the given job name and reports the results. // CheckSD performs service discovery for the given job name and reports the results.
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
@ -114,7 +114,7 @@ outerLoop:
} }
results := []sdCheckResult{} results := []sdCheckResult{}
for _, tgs := range sdCheckResults { for _, tgs := range sdCheckResults {
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
} }
res, err := json.MarshalIndent(results, "", " ") res, err := json.MarshalIndent(results, "", " ")
@ -127,7 +127,7 @@ outerLoop:
return successExitCode return successExitCode
} }
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
sdCheckResults := []sdCheckResult{} sdCheckResults := []sdCheckResult{}
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
for _, targetGroup := range targetGroups { for _, targetGroup := range targetGroups {
@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
} }
} }
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
result := sdCheckResult{ result := sdCheckResult{
DiscoveredLabels: orig, DiscoveredLabels: orig,
Labels: res, Labels: res,

View file

@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
}, },
} }
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
} }

View file

@ -56,7 +56,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | | | <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | | <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). |
| <code class="text-nowrap">--version</code> | Show application version. | | <code class="text-nowrap">--version</code> | Show application version. |
| <code class="text-nowrap">--experimental</code> | Enable experimental commands. | | <code class="text-nowrap">--experimental</code> | Enable experimental commands. |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Currently unused. |

View file

@ -71,15 +71,6 @@ When enabled, the GOMEMLIMIT variable is automatically set to match the Linux co
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
## No default scrape port
`--enable-feature=no-default-scrape-port`
When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
## Native Histograms ## Native Histograms
`--enable-feature=native-histograms` `--enable-feature=native-histograms`

View file

@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin
// Options are the configuration parameters to the scrape manager. // Options are the configuration parameters to the scrape manager.
type Options struct { type Options struct {
ExtraMetrics bool ExtraMetrics bool
NoDefaultPort bool
// Option used by downstream scraper users like OpenTelemetry Collector // Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus. // to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool PassMetadataInContext bool

View file

@ -54,12 +54,11 @@ func init() {
func TestPopulateLabels(t *testing.T) { func TestPopulateLabels(t *testing.T) {
cases := []struct { cases := []struct {
in labels.Labels in labels.Labels
cfg *config.ScrapeConfig cfg *config.ScrapeConfig
noDefaultPort bool res labels.Labels
res labels.Labels resOrig labels.Labels
resOrig labels.Labels err string
err string
}{ }{
// Regular population of scrape config options. // Regular population of scrape config options.
{ {
@ -113,8 +112,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80", model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4",
model.SchemeLabel: "http", model.SchemeLabel: "http",
model.MetricsPathLabel: "/custom", model.MetricsPathLabel: "/custom",
model.JobLabel: "custom-job", model.JobLabel: "custom-job",
@ -144,7 +143,7 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "[::1]:443", model.AddressLabel: "[::1]",
model.InstanceLabel: "custom-instance", model.InstanceLabel: "custom-instance",
model.SchemeLabel: "https", model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -367,7 +366,6 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4", model.InstanceLabel: "1.2.3.4",
@ -386,7 +384,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s", model.ScrapeTimeoutLabel: "1s",
}), }),
}, },
// Remove default port (http). // verify that the default port is not removed (http).
{ {
in: labels.FromMap(map[string]string{ in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80", model.AddressLabel: "1.2.3.4:80",
@ -398,9 +396,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4:80",
model.InstanceLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4:80",
model.SchemeLabel: "http", model.SchemeLabel: "http",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -417,7 +414,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s", model.ScrapeTimeoutLabel: "1s",
}), }),
}, },
// Remove default port (https). // verify that the default port is not removed (https).
{ {
in: labels.FromMap(map[string]string{ in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:443", model.AddressLabel: "1.2.3.4:443",
@ -429,9 +426,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4:443",
model.InstanceLabel: "1.2.3.4:443", model.InstanceLabel: "1.2.3.4:443",
model.SchemeLabel: "https", model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -452,7 +448,7 @@ func TestPopulateLabels(t *testing.T) {
for _, c := range cases { for _, c := range cases {
in := c.in.Copy() in := c.in.Copy()
res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort) res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg)
if c.err != "" { if c.err != "" {
require.EqualError(t, err, c.err) require.EqualError(t, err, c.err)
} else { } else {

View file

@ -87,8 +87,6 @@ type scrapePool struct {
// Constructor for new scrape loops. This is settable for testing convenience. // Constructor for new scrape loops. This is settable for testing convenience.
newLoop func(scrapeLoopOptions) loop newLoop func(scrapeLoopOptions) loop
noDefaultPort bool
metrics *scrapeMetrics metrics *scrapeMetrics
scrapeFailureLogger log.Logger scrapeFailureLogger log.Logger
@ -149,7 +147,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
logger: logger, logger: logger,
metrics: metrics, metrics: metrics,
httpOpts: options.HTTPClientOptions, httpOpts: options.HTTPClientOptions,
noDefaultPort: options.NoDefaultPort,
} }
sp.newLoop = func(opts scrapeLoopOptions) loop { sp.newLoop = func(opts scrapeLoopOptions) loop {
// Update the targets retrieval function for metadata to a new scrape cache. // Update the targets retrieval function for metadata to a new scrape cache.
@ -429,7 +426,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
sp.droppedTargets = []*Target{} sp.droppedTargets = []*Target{}
sp.droppedTargetsCount = 0 sp.droppedTargetsCount = 0
for _, tg := range tgs { for _, tg := range tgs {
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) targets, failures := TargetsFromGroup(tg, sp.config, targets, lb)
for _, err := range failures { for _, err := range failures {
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
} }

View file

@ -17,7 +17,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"net"
"net/url" "net/url"
"strings" "strings"
"sync" "sync"
@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
// PopulateLabels builds a label set from the given label set and scrape configuration. // PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value. // It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
// Copy labels into the labelset for the target if they are not set already. // Copy labels into the labelset for the target if they are not set already.
scrapeLabels := []labels.Label{ scrapeLabels := []labels.Label{
{Name: model.JobLabel, Value: cfg.JobName}, {Name: model.JobLabel, Value: cfg.JobName},
@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
} }
// addPort checks whether we should add a default port to the address.
// If the address is not valid, we don't append a port either.
addPort := func(s string) (string, string, bool) {
// If we can split, a port exists and we don't have to add one.
if host, port, err := net.SplitHostPort(s); err == nil {
return host, port, false
}
// If adding a port makes it valid, the previous error
// was not due to an invalid address and we can append a port.
_, _, err := net.SplitHostPort(s + ":1234")
return "", "", err == nil
}
addr := lb.Get(model.AddressLabel) addr := lb.Get(model.AddressLabel)
scheme := lb.Get(model.SchemeLabel)
host, port, add := addPort(addr)
// If it's an address with no trailing port, infer it based on the used scheme
// unless the no-default-scrape-port feature flag is present.
if !noDefaultPort && add {
// Addresses reaching this point are already wrapped in [] if necessary.
switch scheme {
case "http", "":
addr += ":80"
case "https":
addr += ":443"
default:
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme)
}
lb.Set(model.AddressLabel, addr)
}
if noDefaultPort {
// If it's an address with a trailing default port and the
// no-default-scrape-port flag is present, remove the port.
switch port {
case "80":
if scheme == "http" {
lb.Set(model.AddressLabel, host)
}
case "443":
if scheme == "https" {
lb.Set(model.AddressLabel, host)
}
}
}
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err return labels.EmptyLabels(), labels.EmptyLabels(), err
@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
} }
// TargetsFromGroup builds targets based on the given TargetGroup and config. // TargetsFromGroup builds targets based on the given TargetGroup and config.
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) {
targets = targets[:0] targets = targets[:0]
failures := []error{} failures := []error{}
@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault
} }
} }
lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) lset, origLabels, err := PopulateLabels(lb, cfg)
if err != nil { if err != nil {
failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err))
} }

View file

@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) {
ScrapeInterval: model.Duration(1 * time.Minute), ScrapeInterval: model.Duration(1 * time.Minute),
} }
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb) targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb)
require.Len(t, targets, 1) require.Len(t, targets, 1)
require.Len(t, failures, 1) require.Len(t, failures, 1)
require.EqualError(t, failures[0], expectedError) require.EqualError(t, failures[0], expectedError)
@ -435,7 +435,7 @@ scrape_configs:
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
group := &targetgroup.Group{Targets: targets} group := &targetgroup.Group{Targets: targets}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb) tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb)
if len(targets) != nTargets { if len(targets) != nTargets {
b.Fatalf("Expected %d targets, got %d", nTargets, len(targets)) b.Fatalf("Expected %d targets, got %d", nTargets, len(targets))
} }