mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
no-default-scrape-port
feature flag (#9523)
* Add `no-default-scrape-port` flag Signed-off-by: Levi Harrison <git@leviharrison.dev>
This commit is contained in:
parent
a0f7c31c26
commit
d61459d826
|
@ -179,7 +179,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics")
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
|
@ -192,6 +192,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "no-default-scrape-port":
|
||||
c.scrape.NoDefaultPort = true
|
||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -391,7 +394,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
|
|
@ -211,7 +211,7 @@ func main() {
|
|||
"A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.",
|
||||
).Required().ExistingFiles()
|
||||
|
||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||
|
||||
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
|
||||
|
||||
|
@ -223,10 +223,13 @@ func main() {
|
|||
p = &promqlPrinter{}
|
||||
}
|
||||
|
||||
var noDefaultScrapePort bool
|
||||
for _, f := range *featureList {
|
||||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "no-default-scrape-port":
|
||||
noDefaultScrapePort = true
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -239,7 +242,7 @@ func main() {
|
|||
|
||||
switch parsedCmd {
|
||||
case sdCheckCmd.FullCommand():
|
||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout))
|
||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort))
|
||||
|
||||
case checkConfigCmd.FullCommand():
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||
|
@ -1217,7 +1220,7 @@ func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *
|
|||
|
||||
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
|
||||
for _, tg := range targetGroups {
|
||||
_, failures := scrape.TargetsFromGroup(tg, scfg)
|
||||
_, failures := scrape.TargetsFromGroup(tg, scfg, false)
|
||||
if len(failures) > 0 {
|
||||
first := failures[0]
|
||||
return first
|
||||
|
|
|
@ -37,7 +37,7 @@ type sdCheckResult struct {
|
|||
}
|
||||
|
||||
// CheckSD performs service discovery for the given job name and reports the results.
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int {
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
|
@ -94,7 +94,7 @@ outerLoop:
|
|||
}
|
||||
results := []sdCheckResult{}
|
||||
for _, tgs := range sdCheckResults {
|
||||
results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
|
||||
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
|
||||
}
|
||||
|
||||
res, err := json.MarshalIndent(results, "", " ")
|
||||
|
@ -107,7 +107,7 @@ outerLoop:
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
||||
sdCheckResults := []sdCheckResult{}
|
||||
for _, targetGroup := range targetGroups {
|
||||
for _, target := range targetGroup.Targets {
|
||||
|
@ -124,7 +124,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
|
|||
}
|
||||
|
||||
targetLabels := labels.New(labelSlice...)
|
||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig)
|
||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig, noDefaultScrapePort)
|
||||
result := sdCheckResult{
|
||||
DiscoveredLabels: orig,
|
||||
Labels: res,
|
||||
|
|
|
@ -69,5 +69,5 @@ func TestSDCheckResult(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
|
||||
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
|
||||
}
|
||||
|
|
|
@ -94,3 +94,12 @@ computed at all.
|
|||
`--enable-feature=auto-gomaxprocs`
|
||||
|
||||
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
|
||||
|
||||
## No default scrape port
|
||||
|
||||
`--enable-feature=no-default-scrape-port`
|
||||
|
||||
When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
|
||||
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
|
||||
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
|
||||
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
|
||||
|
|
|
@ -124,7 +124,8 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager
|
|||
|
||||
// Options are the configuration parameters to the scrape manager.
|
||||
type Options struct {
|
||||
ExtraMetrics bool
|
||||
ExtraMetrics bool
|
||||
NoDefaultPort bool
|
||||
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||
// to help lookup metric metadata. Should be false for Prometheus.
|
||||
PassMetadataInContext bool
|
||||
|
@ -207,7 +208,7 @@ func (m *Manager) reload() {
|
|||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||
continue
|
||||
}
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.PassMetadataInContext, m.opts.HTTPClientOptions)
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||
continue
|
||||
|
|
|
@ -31,11 +31,12 @@ import (
|
|||
|
||||
func TestPopulateLabels(t *testing.T) {
|
||||
cases := []struct {
|
||||
in labels.Labels
|
||||
cfg *config.ScrapeConfig
|
||||
res labels.Labels
|
||||
resOrig labels.Labels
|
||||
err string
|
||||
in labels.Labels
|
||||
cfg *config.ScrapeConfig
|
||||
noDefaultPort bool
|
||||
res labels.Labels
|
||||
resOrig labels.Labels
|
||||
err string
|
||||
}{
|
||||
// Regular population of scrape config options.
|
||||
{
|
||||
|
@ -331,11 +332,104 @@ func TestPopulateLabels(t *testing.T) {
|
|||
resOrig: nil,
|
||||
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
|
||||
},
|
||||
// Don't attach default port.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Remove default port (http).
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "http",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Remove default port (https).
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:443",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4:443",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:443",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
in := c.in.Copy()
|
||||
|
||||
res, orig, err := PopulateLabels(c.in, c.cfg)
|
||||
res, orig, err := PopulateLabels(c.in, c.cfg, c.noDefaultPort)
|
||||
if c.err != "" {
|
||||
require.EqualError(t, err, c.err)
|
||||
} else {
|
||||
|
|
|
@ -239,6 +239,8 @@ type scrapePool struct {
|
|||
|
||||
// Constructor for new scrape loops. This is settable for testing convenience.
|
||||
newLoop func(scrapeLoopOptions) loop
|
||||
|
||||
noDefaultPort bool
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -264,13 +266,13 @@ const maxAheadTime = 10 * time.Minute
|
|||
|
||||
type labelsMutator func(labels.Labels) labels.Labels
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics, passMetadataInContext bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) {
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||
targetScrapePools.Inc()
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, httpOpts...)
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -287,7 +289,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
httpOpts: httpOpts,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
|
@ -314,10 +317,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
opts.labelLimits,
|
||||
opts.interval,
|
||||
opts.timeout,
|
||||
reportExtraMetrics,
|
||||
options.ExtraMetrics,
|
||||
opts.target,
|
||||
cache,
|
||||
passMetadataInContext,
|
||||
options.PassMetadataInContext,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -480,7 +483,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|||
var all []*Target
|
||||
sp.droppedTargets = []*Target{}
|
||||
for _, tg := range tgs {
|
||||
targets, failures := TargetsFromGroup(tg, sp.config)
|
||||
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort)
|
||||
for _, err := range failures {
|
||||
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestNewScrapePool(t *testing.T) {
|
|||
var (
|
||||
app = &nopAppendable{}
|
||||
cfg = &config.ScrapeConfig{}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
)
|
||||
|
||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||
|
@ -91,7 +91,7 @@ func TestDroppedTargetsList(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
||||
expectedLength = 1
|
||||
)
|
||||
|
@ -488,7 +488,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
func TestScrapePoolAppender(t *testing.T) {
|
||||
cfg := &config.ScrapeConfig{}
|
||||
app := &nopAppendable{}
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
|
||||
loop := sp.newLoop(scrapeLoopOptions{
|
||||
target: &Target{},
|
||||
|
@ -530,7 +530,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
|||
newConfig := func() *config.ScrapeConfig {
|
||||
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
|
||||
}
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{})
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
|
@ -2626,7 +2626,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
|
@ -2839,7 +2839,7 @@ func TestReuseCacheRace(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
|
@ -2971,7 +2971,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, false, false, nil)
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, &Options{})
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
|
@ -3141,7 +3141,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{})
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
|
||||
|
|
|
@ -351,7 +351,7 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
|
|||
// PopulateLabels builds a label set from the given label set and scrape configuration.
|
||||
// It returns a label set before relabeling was applied as the second return value.
|
||||
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
|
||||
func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
|
||||
func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) {
|
||||
// Copy labels into the labelset for the target if they are not set already.
|
||||
scrapeLabels := []labels.Label{
|
||||
{Name: model.JobLabel, Value: cfg.JobName},
|
||||
|
@ -389,21 +389,25 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
|
||||
// addPort checks whether we should add a default port to the address.
|
||||
// If the address is not valid, we don't append a port either.
|
||||
addPort := func(s string) bool {
|
||||
addPort := func(s string) (string, string, bool) {
|
||||
// If we can split, a port exists and we don't have to add one.
|
||||
if _, _, err := net.SplitHostPort(s); err == nil {
|
||||
return false
|
||||
if host, port, err := net.SplitHostPort(s); err == nil {
|
||||
return host, port, false
|
||||
}
|
||||
// If adding a port makes it valid, the previous error
|
||||
// was not due to an invalid address and we can append a port.
|
||||
_, _, err := net.SplitHostPort(s + ":1234")
|
||||
return err == nil
|
||||
return "", "", err == nil
|
||||
}
|
||||
|
||||
addr := lset.Get(model.AddressLabel)
|
||||
// If it's an address with no trailing port, infer it based on the used scheme.
|
||||
if addPort(addr) {
|
||||
scheme := lset.Get(model.SchemeLabel)
|
||||
host, port, add := addPort(addr)
|
||||
// If it's an address with no trailing port, infer it based on the used scheme
|
||||
// unless the no-default-scrape-port feature flag is present.
|
||||
if !noDefaultPort && add {
|
||||
// Addresses reaching this point are already wrapped in [] if necessary.
|
||||
switch lset.Get(model.SchemeLabel) {
|
||||
switch scheme {
|
||||
case "http", "":
|
||||
addr = addr + ":80"
|
||||
case "https":
|
||||
|
@ -414,6 +418,21 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
lb.Set(model.AddressLabel, addr)
|
||||
}
|
||||
|
||||
if noDefaultPort {
|
||||
// If it's an address with a trailing default port and the
|
||||
// no-default-scrape-port flag is present, remove the port.
|
||||
switch port {
|
||||
case "80":
|
||||
if scheme == "http" {
|
||||
lb.Set(model.AddressLabel, host)
|
||||
}
|
||||
case "443":
|
||||
if scheme == "https" {
|
||||
lb.Set(model.AddressLabel, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -464,7 +483,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
}
|
||||
|
||||
// TargetsFromGroup builds targets based on the given TargetGroup and config.
|
||||
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) {
|
||||
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool) ([]*Target, []error) {
|
||||
targets := make([]*Target, 0, len(tg.Targets))
|
||||
failures := []error{}
|
||||
|
||||
|
@ -482,7 +501,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe
|
|||
|
||||
lset := labels.New(lbls...)
|
||||
|
||||
lbls, origLabels, err := PopulateLabels(lset, cfg)
|
||||
lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
|
||||
if err != nil {
|
||||
failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg))
|
||||
}
|
||||
|
|
|
@ -375,7 +375,7 @@ func TestTargetsFromGroup(t *testing.T) {
|
|||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
}
|
||||
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg)
|
||||
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false)
|
||||
if len(targets) != 1 {
|
||||
t.Fatalf("Expected 1 target, got %v", len(targets))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue