diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 7d9106b33..2a96a6ba7 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -217,9 +217,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
- case "no-default-scrape-port":
- c.scrape.NoDefaultPort = true
- level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
@@ -474,7 +471,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
- a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
+ a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 4d4cf6c5d..48f9be930 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -291,7 +291,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
- featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
+ featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@@ -321,24 +321,21 @@ func main() {
}
}
- var noDefaultScrapePort bool
for _, f := range *featureList {
opts := strings.Split(f, ",")
for _, o := range opts {
switch o {
- case "no-default-scrape-port":
- noDefaultScrapePort = true
case "":
continue
default:
- fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
+ fmt.Printf(" WARNING: --enable-feature is currently a no-op")
}
}
}
switch parsedCmd {
case sdCheckCmd.FullCommand():
- os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer))
+ os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
@@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
lb := labels.NewBuilder(labels.EmptyLabels())
for _, tg := range targetGroups {
var failures []error
- targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb)
+ targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb)
if len(failures) > 0 {
first := failures[0]
return first
diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go
index e65262d43..6c0e896ff 100644
--- a/cmd/promtool/sd.go
+++ b/cmd/promtool/sd.go
@@ -38,7 +38,7 @@ type sdCheckResult struct {
}
// CheckSD performs service discovery for the given job name and reports the results.
-func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int {
+func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
@@ -114,7 +114,7 @@ outerLoop:
}
results := []sdCheckResult{}
for _, tgs := range sdCheckResults {
- results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
+ results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
}
res, err := json.MarshalIndent(results, "", " ")
@@ -127,7 +127,7 @@ outerLoop:
return successExitCode
}
-func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
+func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
sdCheckResults := []sdCheckResult{}
lb := labels.NewBuilder(labels.EmptyLabels())
for _, targetGroup := range targetGroups {
@@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
}
}
- res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort)
+ res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
result := sdCheckResult{
DiscoveredLabels: orig,
Labels: res,
diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go
index cb65ee72a..44d808465 100644
--- a/cmd/promtool/sd_test.go
+++ b/cmd/promtool/sd_test.go
@@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
},
}
- testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
+ testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
}
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index 32b7039bd..7737b5021 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -56,7 +56,7 @@ The Prometheus monitoring server
| --query.timeout
| Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| --query.max-concurrency
| Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| --query.max-samples
| Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
-| --enable-feature
... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
+| --enable-feature
... | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| --agent
| Run Prometheus in 'Agent mode'. | |
| --log.level
| Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| --log.format
| Output format of log messages. One of: [logfmt, json] | `logfmt` |
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index e48cede79..996a99655 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| -h
, --help
| Show context-sensitive help (also try --help-long and --help-man). |
| --version
| Show application version. |
| --experimental
| Enable experimental commands. |
-| --enable-feature
... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. |
+| --enable-feature
... | Comma separated feature names to enable. Currently unused. |
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index 911dde20e..1e9455a3f 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -71,15 +71,6 @@ When enabled, the GOMEMLIMIT variable is automatically set to match the Linux co
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
-## No default scrape port
-
-`--enable-feature=no-default-scrape-port`
-
-When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
-the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
-In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
-by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
-
## Native Histograms
`--enable-feature=native-histograms`
diff --git a/scrape/manager.go b/scrape/manager.go
index d7786a082..cbb881028 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin
// Options are the configuration parameters to the scrape manager.
type Options struct {
- ExtraMetrics bool
- NoDefaultPort bool
+ ExtraMetrics bool
// Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool
diff --git a/scrape/manager_test.go b/scrape/manager_test.go
index 13a369812..7e01238cc 100644
--- a/scrape/manager_test.go
+++ b/scrape/manager_test.go
@@ -54,12 +54,11 @@ func init() {
func TestPopulateLabels(t *testing.T) {
cases := []struct {
- in labels.Labels
- cfg *config.ScrapeConfig
- noDefaultPort bool
- res labels.Labels
- resOrig labels.Labels
- err string
+ in labels.Labels
+ cfg *config.ScrapeConfig
+ res labels.Labels
+ resOrig labels.Labels
+ err string
}{
// Regular population of scrape config options.
{
@@ -113,8 +112,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second),
},
res: labels.FromMap(map[string]string{
- model.AddressLabel: "1.2.3.4:80",
- model.InstanceLabel: "1.2.3.4:80",
+ model.AddressLabel: "1.2.3.4",
+ model.InstanceLabel: "1.2.3.4",
model.SchemeLabel: "http",
model.MetricsPathLabel: "/custom",
model.JobLabel: "custom-job",
@@ -144,7 +143,7 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second),
},
res: labels.FromMap(map[string]string{
- model.AddressLabel: "[::1]:443",
+ model.AddressLabel: "[::1]",
model.InstanceLabel: "custom-instance",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
@@ -367,7 +366,6 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
- noDefaultPort: true,
res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4",
@@ -386,7 +384,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s",
}),
},
- // Remove default port (http).
+ // verify that the default port is not removed (http).
{
in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80",
@@ -398,9 +396,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
- noDefaultPort: true,
res: labels.FromMap(map[string]string{
- model.AddressLabel: "1.2.3.4",
+ model.AddressLabel: "1.2.3.4:80",
model.InstanceLabel: "1.2.3.4:80",
model.SchemeLabel: "http",
model.MetricsPathLabel: "/metrics",
@@ -417,7 +414,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s",
}),
},
- // Remove default port (https).
+ // verify that the default port is not removed (https).
{
in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:443",
@@ -429,9 +426,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second),
},
- noDefaultPort: true,
res: labels.FromMap(map[string]string{
- model.AddressLabel: "1.2.3.4",
+ model.AddressLabel: "1.2.3.4:443",
model.InstanceLabel: "1.2.3.4:443",
model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics",
@@ -452,7 +448,7 @@ func TestPopulateLabels(t *testing.T) {
for _, c := range cases {
in := c.in.Copy()
- res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort)
+ res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg)
if c.err != "" {
require.EqualError(t, err, c.err)
} else {
diff --git a/scrape/scrape.go b/scrape/scrape.go
index f29beeb03..071edfca5 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -87,8 +87,6 @@ type scrapePool struct {
// Constructor for new scrape loops. This is settable for testing convenience.
newLoop func(scrapeLoopOptions) loop
- noDefaultPort bool
-
metrics *scrapeMetrics
scrapeFailureLogger log.Logger
@@ -149,7 +147,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
logger: logger,
metrics: metrics,
httpOpts: options.HTTPClientOptions,
- noDefaultPort: options.NoDefaultPort,
}
sp.newLoop = func(opts scrapeLoopOptions) loop {
// Update the targets retrieval function for metadata to a new scrape cache.
@@ -429,7 +426,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
sp.droppedTargets = []*Target{}
sp.droppedTargetsCount = 0
for _, tg := range tgs {
- targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb)
+ targets, failures := TargetsFromGroup(tg, sp.config, targets, lb)
for _, err := range failures {
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
}
diff --git a/scrape/target.go b/scrape/target.go
index 375439833..06d4737ff 100644
--- a/scrape/target.go
+++ b/scrape/target.go
@@ -17,7 +17,6 @@ import (
"errors"
"fmt"
"hash/fnv"
- "net"
"net/url"
"strings"
"sync"
@@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
// PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
-func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) {
+func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
// Copy labels into the labelset for the target if they are not set already.
scrapeLabels := []labels.Label{
{Name: model.JobLabel, Value: cfg.JobName},
@@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
}
- // addPort checks whether we should add a default port to the address.
- // If the address is not valid, we don't append a port either.
- addPort := func(s string) (string, string, bool) {
- // If we can split, a port exists and we don't have to add one.
- if host, port, err := net.SplitHostPort(s); err == nil {
- return host, port, false
- }
- // If adding a port makes it valid, the previous error
- // was not due to an invalid address and we can append a port.
- _, _, err := net.SplitHostPort(s + ":1234")
- return "", "", err == nil
- }
-
addr := lb.Get(model.AddressLabel)
- scheme := lb.Get(model.SchemeLabel)
- host, port, add := addPort(addr)
- // If it's an address with no trailing port, infer it based on the used scheme
- // unless the no-default-scrape-port feature flag is present.
- if !noDefaultPort && add {
- // Addresses reaching this point are already wrapped in [] if necessary.
- switch scheme {
- case "http", "":
- addr += ":80"
- case "https":
- addr += ":443"
- default:
- return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme)
- }
- lb.Set(model.AddressLabel, addr)
- }
-
- if noDefaultPort {
- // If it's an address with a trailing default port and the
- // no-default-scrape-port flag is present, remove the port.
- switch port {
- case "80":
- if scheme == "http" {
- lb.Set(model.AddressLabel, host)
- }
- case "443":
- if scheme == "https" {
- lb.Set(model.AddressLabel, host)
- }
- }
- }
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err
@@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
}
// TargetsFromGroup builds targets based on the given TargetGroup and config.
-func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) {
+func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) {
targets = targets[:0]
failures := []error{}
@@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault
}
}
- lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort)
+ lset, origLabels, err := PopulateLabels(lb, cfg)
if err != nil {
failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err))
}
diff --git a/scrape/target_test.go b/scrape/target_test.go
index 84fe078b2..bd2795287 100644
--- a/scrape/target_test.go
+++ b/scrape/target_test.go
@@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) {
ScrapeInterval: model.Duration(1 * time.Minute),
}
lb := labels.NewBuilder(labels.EmptyLabels())
- targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb)
+ targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb)
require.Len(t, targets, 1)
require.Len(t, failures, 1)
require.EqualError(t, failures[0], expectedError)
@@ -435,7 +435,7 @@ scrape_configs:
lb := labels.NewBuilder(labels.EmptyLabels())
group := &targetgroup.Group{Targets: targets}
for i := 0; i < b.N; i++ {
- tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb)
+ tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb)
if len(targets) != nTargets {
b.Fatalf("Expected %d targets, got %d", nTargets, len(targets))
}