diff --git a/CHANGELOG.md b/CHANGELOG.md index def883a4a..d1f5287c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.1.1 / 2016-09-07 + +* [BUGFIX] Fix IPv6 escaping in service discovery integrations +* [BUGFIX] Fix default scrape port assignment for IPv6 + ## 1.1.0 / 2016-09-03 * [FEATURE] Add `quantile` and `quantile_over_time`. diff --git a/VERSION b/VERSION index 9084fa2f7..524cb5524 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.1.0 +1.1.1 diff --git a/retrieval/discovery/azure.go b/retrieval/discovery/azure.go index 629d21b2a..0f1334eb8 100644 --- a/retrieval/discovery/azure.go +++ b/retrieval/discovery/azure.go @@ -15,6 +15,7 @@ package discovery import ( "fmt" + "net" "strings" "time" @@ -217,7 +218,7 @@ func (ad *AzureDiscovery) refresh() (*config.TargetGroup, error) { for _, ip := range *networkInterface.Properties.IPConfigurations { if ip.Properties.PrivateIPAddress != nil { labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) - address := fmt.Sprintf("%s:%d", *ip.Properties.PrivateIPAddress, ad.port) + address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", ad.port)) labels[model.AddressLabel] = model.LabelValue(address) ch <- target{labelSet: labels, err: nil} return diff --git a/retrieval/discovery/consul/consul.go b/retrieval/discovery/consul/consul.go index e11d4acb5..28536b59c 100644 --- a/retrieval/discovery/consul/consul.go +++ b/retrieval/discovery/consul/consul.go @@ -15,6 +15,7 @@ package consul import ( "fmt" + "net" "strconv" "strings" "time" @@ -238,9 +239,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*config.TargetG // since the service may be registered remotely through a different node var addr string if node.ServiceAddress != "" { - addr = fmt.Sprintf("%s:%d", node.ServiceAddress, node.ServicePort) + addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) } else { - addr = fmt.Sprintf("%s:%d", node.Address, node.ServicePort) + addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) } tgroup.Targets = append(tgroup.Targets, model.LabelSet{ diff --git a/retrieval/discovery/dns/dns.go b/retrieval/discovery/dns/dns.go index 432f1c042..960507a0c 100644 --- a/retrieval/discovery/dns/dns.go +++ b/retrieval/discovery/dns/dns.go @@ -133,6 +133,9 @@ func (dd *Discovery) refresh(ctx context.Context, name string, ch chan<- []*conf } tg := &config.TargetGroup{} + hostPort := func(a string, p int) model.LabelValue { + return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) + } for _, record := range response.Answer { target := model.LabelValue("") @@ -141,11 +144,11 @@ func (dd *Discovery) refresh(ctx context.Context, name string, ch chan<- []*conf // Remove the final dot from rooted DNS names to make them look more usual. addr.Target = strings.TrimRight(addr.Target, ".") - target = model.LabelValue(fmt.Sprintf("%s:%d", addr.Target, addr.Port)) + target = hostPort(addr.Target, int(addr.Port)) case *dns.A: - target = model.LabelValue(fmt.Sprintf("%s:%d", addr.A, dd.port)) + target = hostPort(addr.A.String(), dd.port) case *dns.AAAA: - target = model.LabelValue(fmt.Sprintf("%s:%d", addr.AAAA, dd.port)) + target = hostPort(addr.AAAA.String(), dd.port) default: log.Warnf("%q is not a valid SRV record", record) continue diff --git a/retrieval/discovery/ec2.go b/retrieval/discovery/ec2.go index e39016948..6d6e340ac 100644 --- a/retrieval/discovery/ec2.go +++ b/retrieval/discovery/ec2.go @@ -15,6 +15,7 @@ package discovery import ( "fmt" + "net" "strings" "time" @@ -112,7 +113,7 @@ func (ed *EC2Discovery) refresh() (*config.TargetGroup, error) { ec2LabelInstanceID: model.LabelValue(*inst.InstanceId), } labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress) - addr := fmt.Sprintf("%s:%d", *inst.PrivateIpAddress, ed.port) + addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", ed.port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { diff --git a/retrieval/discovery/kubernetes/node.go b/retrieval/discovery/kubernetes/node.go index e64bc2eb1..26b173b65 100644 --- a/retrieval/discovery/kubernetes/node.go +++ b/retrieval/discovery/kubernetes/node.go @@ -92,7 +92,7 @@ func (d *nodeDiscovery) updateNodesTargetGroup() *config.TargetGroup { kubeletPort := int(node.Status.DaemonEndpoints.KubeletEndpoint.Port) - address := fmt.Sprintf("%s:%d", defaultNodeAddress.String(), kubeletPort) + address := net.JoinHostPort(defaultNodeAddress.String(), fmt.Sprintf("%d", kubeletPort)) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), diff --git a/retrieval/discovery/kubernetes/service.go b/retrieval/discovery/kubernetes/service.go index 3c0c6d04b..ac3bac33c 100644 --- a/retrieval/discovery/kubernetes/service.go +++ b/retrieval/discovery/kubernetes/service.go @@ -337,7 +337,7 @@ func (d *serviceDiscovery) updateServiceTargetGroup(service *Service, eps *Endpo if len(ipAddr) == net.IPv6len { ipAddr = "[" + ipAddr + "]" } - address := fmt.Sprintf("%s:%d", ipAddr, epPort) + address := net.JoinHostPort(ipAddr, fmt.Sprintf("%d", epPort)) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), diff --git a/retrieval/discovery/marathon/marathon.go b/retrieval/discovery/marathon/marathon.go index 4c94d6a8d..d6bf7b444 100644 --- a/retrieval/discovery/marathon/marathon.go +++ b/retrieval/discovery/marathon/marathon.go @@ -18,6 +18,7 @@ import ( "fmt" "io/ioutil" "math/rand" + "net" "net/http" "time" @@ -229,5 +230,5 @@ func targetsForApp(app *App) []model.LabelSet { } func targetForTask(task *Task) string { - return fmt.Sprintf("%s:%d", task.Host, task.Ports[0]) + return net.JoinHostPort(task.Host, fmt.Sprintf("%d", task.Ports[0])) } diff --git a/retrieval/discovery/nerve.go b/retrieval/discovery/nerve.go index 8422ce38b..ed9c13acf 100644 --- a/retrieval/discovery/nerve.go +++ b/retrieval/discovery/nerve.go @@ -16,6 +16,7 @@ package discovery import ( "encoding/json" "fmt" + "net" "sync" "time" @@ -133,7 +134,7 @@ func parseNerveMember(data []byte, path string) (*model.LabelSet, error) { labels := model.LabelSet{} labels[nervePathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - fmt.Sprintf("%s:%d", member.Host, member.Port)) + net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) diff --git a/retrieval/discovery/serverset.go b/retrieval/discovery/serverset.go index 48748518f..1b75c70da 100644 --- a/retrieval/discovery/serverset.go +++ b/retrieval/discovery/serverset.go @@ -16,6 +16,7 @@ package discovery import ( "encoding/json" "fmt" + "net" "strconv" "sync" "time" @@ -143,7 +144,7 @@ func parseServersetMember(data []byte, path string) (*model.LabelSet, error) { labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - fmt.Sprintf("%s:%d", member.ServiceEndpoint.Host, member.ServiceEndpoint.Port)) + net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) diff --git a/retrieval/scrape.go b/retrieval/scrape.go index 74010fae8..cd91b1ba0 100644 --- a/retrieval/scrape.go +++ b/retrieval/scrape.go @@ -115,7 +115,7 @@ type scrapePool struct { } func newScrapePool(cfg *config.ScrapeConfig, app storage.SampleAppender) *scrapePool { - client, err := newHTTPClient(cfg) + client, err := NewHTTPClient(cfg) if err != nil { // Any errors that could occur here should be caught during config validation. log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) @@ -167,7 +167,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) { sp.mtx.Lock() defer sp.mtx.Unlock() - client, err := newHTTPClient(cfg) + client, err := NewHTTPClient(cfg) if err != nil { // Any errors that could occur here should be caught during config validation. log.Errorf("Error creating HTTP client for job %q: %s", cfg.JobName, err) diff --git a/retrieval/target.go b/retrieval/target.go index 17cdcc3c9..10c04193f 100644 --- a/retrieval/target.go +++ b/retrieval/target.go @@ -66,7 +66,8 @@ func NewTarget(labels, metaLabels model.LabelSet, params url.Values) *Target { } } -func newHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) { +// NewHTTPClient returns a new HTTP client configured for the given scrape configuration. +func NewHTTPClient(cfg *config.ScrapeConfig) (*http.Client, error) { tlsOpts := httputil.TLSOptions{ InsecureSkipVerify: cfg.TLSConfig.InsecureSkipVerify, CAFile: cfg.TLSConfig.CAFile, diff --git a/retrieval/target_test.go b/retrieval/target_test.go index 0e01c1c53..d0d7018d6 100644 --- a/retrieval/target_test.go +++ b/retrieval/target_test.go @@ -155,7 +155,7 @@ func TestNewHTTPBearerToken(t *testing.T) { ScrapeTimeout: model.Duration(1 * time.Second), BearerToken: "1234", } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -183,7 +183,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) { ScrapeTimeout: model.Duration(1 * time.Second), BearerTokenFile: "testdata/bearertoken.txt", } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -213,7 +213,7 @@ func TestNewHTTPBasicAuth(t *testing.T) { Password: "password123", }, } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -242,7 +242,7 @@ func TestNewHTTPCACert(t *testing.T) { CAFile: caCertPath, }, } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -277,7 +277,7 @@ func TestNewHTTPClientCert(t *testing.T) { KeyFile: "testdata/client.key", }, } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -307,7 +307,7 @@ func TestNewHTTPWithServerName(t *testing.T) { ServerName: "prometheus.rocks", }, } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -337,7 +337,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) { ServerName: "badname", }, } - c, err := newHTTPClient(cfg) + c, err := NewHTTPClient(cfg) if err != nil { t.Fatal(err) } @@ -377,7 +377,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) { KeyFile: "testdata/nonexistent_client.key", }, } - _, err := newHTTPClient(cfg) + _, err := NewHTTPClient(cfg) if err == nil { t.Fatalf("Expected error, got nil.") } diff --git a/retrieval/targetmanager.go b/retrieval/targetmanager.go index ca01cfdc9..1aa7a20f4 100644 --- a/retrieval/targetmanager.go +++ b/retrieval/targetmanager.go @@ -15,6 +15,7 @@ package retrieval import ( "fmt" + "net" "sort" "strings" "sync" @@ -417,79 +418,103 @@ func providersFromConfig(cfg *config.ScrapeConfig) map[string]TargetProvider { return providers } +// populateLabels builds a label set from the given label set and scrape configuration. +// It returns a label set before relabeling was applied as the second return value. +// Returns a nil label set if the target is dropped during relabeling. +func populateLabels(lset model.LabelSet, cfg *config.ScrapeConfig) (res, orig model.LabelSet, err error) { + if _, ok := lset[model.AddressLabel]; !ok { + return nil, nil, fmt.Errorf("no address") + } + // Copy labels into the labelset for the target if they are not + // set already. Apply the labelsets in order of decreasing precedence. + scrapeLabels := model.LabelSet{ + model.SchemeLabel: model.LabelValue(cfg.Scheme), + model.MetricsPathLabel: model.LabelValue(cfg.MetricsPath), + model.JobLabel: model.LabelValue(cfg.JobName), + } + for ln, lv := range scrapeLabels { + if _, ok := lset[ln]; !ok { + lset[ln] = lv + } + } + // Encode scrape query parameters as labels. + for k, v := range cfg.Params { + if len(v) > 0 { + lset[model.LabelName(model.ParamLabelPrefix+k)] = model.LabelValue(v[0]) + } + } + + preRelabelLabels := lset + lset = relabel.Process(lset, cfg.RelabelConfigs...) + + // Check if the target was dropped. + if lset == nil { + return nil, nil, nil + } + + // addPort checks whether we should add a default port to the address. + // If the address is not valid, we don't append a port either. + addPort := func(s string) bool { + // If we can split, a port exists and we don't have to add one. + if _, _, err := net.SplitHostPort(s); err == nil { + return false + } + // If adding a port makes it valid, the previous error + // was not due to an invalid address and we can append a port. + _, _, err := net.SplitHostPort(s + ":1234") + return err == nil + } + // If it's an address with no trailing port, infer it based on the used scheme. + if addr := string(lset[model.AddressLabel]); addPort(addr) { + // Addresses reaching this point are already wrapped in [] if necessary. + switch lset[model.SchemeLabel] { + case "http", "": + addr = addr + ":80" + case "https": + addr = addr + ":443" + default: + return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) + } + lset[model.AddressLabel] = model.LabelValue(addr) + } + if err := config.CheckTargetAddress(lset[model.AddressLabel]); err != nil { + return nil, nil, err + } + + // Meta labels are deleted after relabelling. Other internal labels propagate to + // the target which decides whether they will be part of their label set. + for ln := range lset { + if strings.HasPrefix(string(ln), model.MetaLabelPrefix) { + delete(lset, ln) + } + } + + // Default the instance label to the target address. + if _, ok := lset[model.InstanceLabel]; !ok { + lset[model.InstanceLabel] = lset[model.AddressLabel] + } + return lset, preRelabelLabels, nil +} + // targetsFromGroup builds targets based on the given TargetGroup and config. -// Panics if target group is nil. func targetsFromGroup(tg *config.TargetGroup, cfg *config.ScrapeConfig) ([]*Target, error) { targets := make([]*Target, 0, len(tg.Targets)) - for i, labels := range tg.Targets { - for k, v := range cfg.Params { - if len(v) > 0 { - labels[model.LabelName(model.ParamLabelPrefix+k)] = model.LabelValue(v[0]) + for i, lset := range tg.Targets { + // Combine target labels with target group labels. + for ln, lv := range tg.Labels { + if _, ok := lset[ln]; !ok { + lset[ln] = lv } } - // Copy labels into the labelset for the target if they are not - // set already. Apply the labelsets in order of decreasing precedence. - labelsets := []model.LabelSet{ - tg.Labels, - { - model.SchemeLabel: model.LabelValue(cfg.Scheme), - model.MetricsPathLabel: model.LabelValue(cfg.MetricsPath), - model.JobLabel: model.LabelValue(cfg.JobName), - }, + labels, origLabels, err := populateLabels(lset, cfg) + if err != nil { + return nil, fmt.Errorf("instance %d in group %s: %s", i, tg, err) } - for _, lset := range labelsets { - for ln, lv := range lset { - if _, ok := labels[ln]; !ok { - labels[ln] = lv - } - } + if labels != nil { + targets = append(targets, NewTarget(labels, origLabels, cfg.Params)) } - - if _, ok := labels[model.AddressLabel]; !ok { - return nil, fmt.Errorf("instance %d in target group %s has no address", i, tg) - } - - preRelabelLabels := labels - - labels := relabel.Process(labels, cfg.RelabelConfigs...) - - // Check if the target was dropped. - if labels == nil { - continue - } - // If no port was provided, infer it based on the used scheme. - addr := string(labels[model.AddressLabel]) - if !strings.Contains(addr, ":") { - switch labels[model.SchemeLabel] { - case "http", "": - addr = fmt.Sprintf("%s:80", addr) - case "https": - addr = fmt.Sprintf("%s:443", addr) - default: - return nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - labels[model.AddressLabel] = model.LabelValue(addr) - } - if err := config.CheckTargetAddress(labels[model.AddressLabel]); err != nil { - return nil, err - } - - for ln := range labels { - // Meta labels are deleted after relabelling. Other internal labels propagate to - // the target which decides whether they will be part of their label set. - if strings.HasPrefix(string(ln), model.MetaLabelPrefix) { - delete(labels, ln) - } - } - - if _, ok := labels[model.InstanceLabel]; !ok { - labels[model.InstanceLabel] = labels[model.AddressLabel] - } - - targets = append(targets, NewTarget(labels, preRelabelLabels, cfg.Params)) } - return targets, nil } diff --git a/retrieval/targetmanager_test.go b/retrieval/targetmanager_test.go index 72894a503..558e2671a 100644 --- a/retrieval/targetmanager_test.go +++ b/retrieval/targetmanager_test.go @@ -14,11 +14,13 @@ package retrieval import ( + "reflect" "testing" "golang.org/x/net/context" "gopkg.in/yaml.v2" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/storage/local" ) @@ -72,3 +74,135 @@ dns_sd_configs: verifyPresence(ts.tgroups, "dns/0/srv.name.one.example.org", false) verifyPresence(ts.tgroups, "dns/0/srv.name.two.example.org", true) } + +func mustNewRegexp(s string) config.Regexp { + re, err := config.NewRegexp(s) + if err != nil { + panic(err) + } + return re +} + +func TestPopulateLabels(t *testing.T) { + cases := []struct { + in model.LabelSet + cfg *config.ScrapeConfig + res model.LabelSet + resOrig model.LabelSet + }{ + // Regular population of scrape config options. + { + in: model.LabelSet{ + model.AddressLabel: "1.2.3.4:1000", + "custom": "value", + }, + cfg: &config.ScrapeConfig{ + Scheme: "https", + MetricsPath: "/metrics", + JobName: "job", + }, + res: model.LabelSet{ + model.AddressLabel: "1.2.3.4:1000", + model.InstanceLabel: "1.2.3.4:1000", + model.SchemeLabel: "https", + model.MetricsPathLabel: "/metrics", + model.JobLabel: "job", + "custom": "value", + }, + resOrig: model.LabelSet{ + model.AddressLabel: "1.2.3.4:1000", + model.SchemeLabel: "https", + model.MetricsPathLabel: "/metrics", + model.JobLabel: "job", + "custom": "value", + }, + }, + // Pre-define/overwrite scrape config labels. + // Leave out port and expect it to be defaulted to scheme. + { + in: model.LabelSet{ + model.AddressLabel: "1.2.3.4", + model.SchemeLabel: "http", + model.MetricsPathLabel: "/custom", + model.JobLabel: "custom-job", + }, + cfg: &config.ScrapeConfig{ + Scheme: "https", + MetricsPath: "/metrics", + JobName: "job", + }, + res: model.LabelSet{ + model.AddressLabel: "1.2.3.4:80", + model.InstanceLabel: "1.2.3.4:80", + model.SchemeLabel: "http", + model.MetricsPathLabel: "/custom", + model.JobLabel: "custom-job", + }, + resOrig: model.LabelSet{ + model.AddressLabel: "1.2.3.4", + model.SchemeLabel: "http", + model.MetricsPathLabel: "/custom", + model.JobLabel: "custom-job", + }, + }, + // Provide instance label. HTTPS port default for IPv6. + { + in: model.LabelSet{ + model.AddressLabel: "[::1]", + model.InstanceLabel: "custom-instance", + }, + cfg: &config.ScrapeConfig{ + Scheme: "https", + MetricsPath: "/metrics", + JobName: "job", + }, + res: model.LabelSet{ + model.AddressLabel: "[::1]:443", + model.InstanceLabel: "custom-instance", + model.SchemeLabel: "https", + model.MetricsPathLabel: "/metrics", + model.JobLabel: "job", + }, + resOrig: model.LabelSet{ + model.AddressLabel: "[::1]", + model.InstanceLabel: "custom-instance", + model.SchemeLabel: "https", + model.MetricsPathLabel: "/metrics", + model.JobLabel: "job", + }, + }, + // Apply relabeling. + { + in: model.LabelSet{ + model.AddressLabel: "1.2.3.4:1000", + "custom": "value", + }, + cfg: &config.ScrapeConfig{ + Scheme: "https", + MetricsPath: "/metrics", + JobName: "job", + RelabelConfigs: []*config.RelabelConfig{ + { + Action: config.RelabelDrop, + Regex: mustNewRegexp(".*"), + SourceLabels: model.LabelNames{"job"}, + }, + }, + }, + res: nil, + resOrig: nil, + }, + } + for i, c := range cases { + res, orig, err := populateLabels(c.in, c.cfg) + if err != nil { + t.Fatalf("case %d: %s", i, err) + } + if !reflect.DeepEqual(res, c.res) { + t.Errorf("case %d: expected res\n\t%+v\n got\n\t%+v", i, c.res, res) + } + if !reflect.DeepEqual(orig, c.resOrig) { + t.Errorf("case %d: expected resOrig\n\t%+v\n got\n\t%+v", i, c.resOrig, orig) + } + } +}