mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
discovery/kubernetes: expose label_selector and field_selector
Close #6807 Co-authored-by @shuttie Signed-off-by: Aleksandra Gacek <algacek@google.com>
This commit is contained in:
parent
b4445ff03f
commit
8e53c19f9c
|
@ -806,7 +806,7 @@ var expectedErrors = []struct {
|
|||
errMsg: "role",
|
||||
}, {
|
||||
filename: "kubernetes_selectors_endpoints.bad.yml",
|
||||
errMsg: "endpoints role supports only pod, service and endpoints selectors",
|
||||
errMsg: "endpoints role supports only pod, service, endpoints selectors",
|
||||
}, {
|
||||
filename: "kubernetes_selectors_ingress.bad.yml",
|
||||
errMsg: "ingress role supports only ingress selectors",
|
||||
|
@ -822,6 +822,12 @@ var expectedErrors = []struct {
|
|||
}, {
|
||||
filename: "kubernetes_namespace_discovery.bad.yml",
|
||||
errMsg: "field foo not found in type kubernetes.plain",
|
||||
}, {
|
||||
filename: "kubernetes_selectors_duplicated_role.bad.yml",
|
||||
errMsg: "duplicated selector role: pod",
|
||||
}, {
|
||||
filename: "kubernetes_selectors_incorrect_selector.bad.yml",
|
||||
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
|
||||
}, {
|
||||
filename: "kubernetes_bearertoken_basicauth.bad.yml",
|
||||
errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured",
|
||||
|
|
11
config/testdata/kubernetes_selectors_duplicated_role.bad.yml
vendored
Normal file
11
config/testdata/kubernetes_selectors_duplicated_role.bad.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
selectors:
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
|
@ -3,12 +3,12 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
selectors:
|
||||
node:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
service:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
endpoints:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "node"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "service"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "endpoints"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,12 +3,12 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
selectors:
|
||||
pod:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
service:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
endpoints:
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "service"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
- role: "endpoints"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
7
config/testdata/kubernetes_selectors_incorrect_selector.bad.yml
vendored
Normal file
7
config/testdata/kubernetes_selectors_incorrect_selector.bad.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
selectors:
|
||||
- role: "pod"
|
||||
field: "metadata.status-Running"
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: ingress
|
||||
selectors:
|
||||
node:
|
||||
- role: "node"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: ingress
|
||||
selectors:
|
||||
ingress:
|
||||
- role: "ingress"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
selectors:
|
||||
pod:
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
selectors:
|
||||
node:
|
||||
- role: "node"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
selectors:
|
||||
node:
|
||||
- role: "node"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
selectors:
|
||||
pod:
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
selectors:
|
||||
pod:
|
||||
- role: "pod"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -3,6 +3,6 @@ scrape_configs:
|
|||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
selectors:
|
||||
service:
|
||||
- role: "service"
|
||||
label: "foo=bar"
|
||||
field: "metadata.status=Running"
|
||||
|
|
|
@ -16,6 +16,7 @@ package kubernetes
|
|||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -28,6 +29,7 @@ import (
|
|||
apiv1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -91,22 +93,28 @@ type SDConfig struct {
|
|||
Role Role `yaml:"role"`
|
||||
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
|
||||
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
|
||||
Selectors RoleSelectorConfig `yaml:"selectors,omitempty"`
|
||||
Selectors []SelectorConfig `yaml:"selectors,omitempty"`
|
||||
}
|
||||
|
||||
type RoleSelectorConfig struct {
|
||||
Node ResourceSelectorConfig `yaml:"node,omitempty"`
|
||||
Pod ResourceSelectorConfig `yaml:"pod,omitempty"`
|
||||
Service ResourceSelectorConfig `yaml:"service,omitempty"`
|
||||
Endpoints ResourceSelectorConfig `yaml:"endpoints,omitempty"`
|
||||
Ingress ResourceSelectorConfig `yaml:"ingress,omitempty"`
|
||||
type roleSelector struct {
|
||||
node resourceSelector
|
||||
pod resourceSelector
|
||||
service resourceSelector
|
||||
endpoints resourceSelector
|
||||
ingress resourceSelector
|
||||
}
|
||||
|
||||
type ResourceSelectorConfig struct {
|
||||
type SelectorConfig struct {
|
||||
Role Role `yaml:"role,omitempty"`
|
||||
Label string `yaml:"label,omitempty"`
|
||||
Field string `yaml:"field,omitempty"`
|
||||
}
|
||||
|
||||
type resourceSelector struct {
|
||||
label string
|
||||
field string
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = SDConfig{}
|
||||
|
@ -115,6 +123,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Role == "" {
|
||||
return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)")
|
||||
}
|
||||
err = c.HTTPClientConfig.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -122,29 +133,45 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config_util.HTTPClientConfig{}) {
|
||||
return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
}
|
||||
switch c.Role {
|
||||
case "pod":
|
||||
if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 {
|
||||
return errors.Errorf("pod role supports only pod selectors")
|
||||
|
||||
foundSelectorRoles := make(map[Role]struct{})
|
||||
allowedSelectors := map[Role][]string{
|
||||
RolePod: {string(RolePod)},
|
||||
RoleService: {string(RoleService)},
|
||||
RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)},
|
||||
RoleNode: {string(RoleNode)},
|
||||
RoleIngress: {string(RoleIngress)},
|
||||
}
|
||||
|
||||
for _, selector := range c.Selectors {
|
||||
if _, ok := foundSelectorRoles[selector.Role]; ok {
|
||||
return errors.Errorf("duplicated selector role: %s", selector.Role)
|
||||
}
|
||||
case "service":
|
||||
if len(c.Selectors.Pod.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 {
|
||||
return errors.Errorf("service role supports only service selectors")
|
||||
foundSelectorRoles[selector.Role] = struct{}{}
|
||||
|
||||
if _, ok := allowedSelectors[c.Role]; !ok {
|
||||
return errors.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, node or ingress", c.Role)
|
||||
}
|
||||
case "endpoints":
|
||||
if len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Node.Field) > 0 {
|
||||
return errors.Errorf("endpoints role supports only pod, service and endpoints selectors")
|
||||
var allowed bool
|
||||
for _, role := range allowedSelectors[c.Role] {
|
||||
if role == string(selector.Role) {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
case "node":
|
||||
if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Ingress.Field) > 0 || len(c.Selectors.Pod.Field) > 0 {
|
||||
return errors.Errorf("node role supports only node selectors")
|
||||
|
||||
if !allowed {
|
||||
return errors.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", "))
|
||||
}
|
||||
case "ingress":
|
||||
if len(c.Selectors.Service.Field) > 0 || len(c.Selectors.Endpoints.Field) > 0 || len(c.Selectors.Node.Field) > 0 || len(c.Selectors.Pod.Field) > 0 {
|
||||
return errors.Errorf("ingress role supports only ingress selectors")
|
||||
|
||||
_, err := fields.ParseSelector(selector.Field)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fields.ParseSelector(selector.Label)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("role missing (one of: pod, service, endpoints, node, ingress)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -196,7 +223,7 @@ type Discovery struct {
|
|||
logger log.Logger
|
||||
namespaceDiscovery *NamespaceDiscovery
|
||||
discoverers []discoverer
|
||||
selectors RoleSelectorConfig
|
||||
selectors roleSelector
|
||||
}
|
||||
|
||||
func (d *Discovery) getNamespaces() []string {
|
||||
|
@ -247,10 +274,34 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
role: conf.Role,
|
||||
namespaceDiscovery: &conf.NamespaceDiscovery,
|
||||
discoverers: make([]discoverer, 0),
|
||||
selectors: conf.Selectors,
|
||||
selectors: mapSelector(conf.Selectors),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapSelector(rawSelector []SelectorConfig) roleSelector {
|
||||
rs := roleSelector{}
|
||||
for _, resourceSelectorRaw := range rawSelector {
|
||||
switch resourceSelectorRaw.Role {
|
||||
case RoleEndpoint:
|
||||
rs.endpoints.field = resourceSelectorRaw.Field
|
||||
rs.endpoints.label = resourceSelectorRaw.Label
|
||||
case RoleIngress:
|
||||
rs.ingress.field = resourceSelectorRaw.Field
|
||||
rs.ingress.label = resourceSelectorRaw.Label
|
||||
case RoleNode:
|
||||
rs.node.field = resourceSelectorRaw.Field
|
||||
rs.node.label = resourceSelectorRaw.Label
|
||||
case RolePod:
|
||||
rs.pod.field = resourceSelectorRaw.Field
|
||||
rs.pod.label = resourceSelectorRaw.Label
|
||||
case RoleService:
|
||||
rs.service.field = resourceSelectorRaw.Field
|
||||
rs.service.label = resourceSelectorRaw.Label
|
||||
}
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
const resyncPeriod = 10 * time.Minute
|
||||
|
||||
// Run implements the discoverer interface.
|
||||
|
@ -264,39 +315,39 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
e := d.client.CoreV1().Endpoints(namespace)
|
||||
elw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Endpoints.Field
|
||||
options.LabelSelector = d.selectors.Endpoints.Label
|
||||
options.FieldSelector = d.selectors.endpoints.field
|
||||
options.LabelSelector = d.selectors.endpoints.label
|
||||
return e.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Endpoints.Field
|
||||
options.LabelSelector = d.selectors.Endpoints.Label
|
||||
options.FieldSelector = d.selectors.endpoints.field
|
||||
options.LabelSelector = d.selectors.endpoints.label
|
||||
return e.Watch(options)
|
||||
},
|
||||
}
|
||||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Service.Field
|
||||
options.LabelSelector = d.selectors.Service.Label
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Service.Field
|
||||
options.LabelSelector = d.selectors.Service.Label
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.Watch(options)
|
||||
},
|
||||
}
|
||||
p := d.client.CoreV1().Pods(namespace)
|
||||
plw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Pod.Field
|
||||
options.LabelSelector = d.selectors.Pod.Label
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Pod.Field
|
||||
options.LabelSelector = d.selectors.Pod.Label
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.Watch(options)
|
||||
},
|
||||
}
|
||||
|
@ -316,13 +367,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
p := d.client.CoreV1().Pods(namespace)
|
||||
plw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Pod.Field
|
||||
options.LabelSelector = d.selectors.Pod.Label
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Pod.Field
|
||||
options.LabelSelector = d.selectors.Pod.Label
|
||||
options.FieldSelector = d.selectors.pod.field
|
||||
options.LabelSelector = d.selectors.pod.label
|
||||
return p.Watch(options)
|
||||
},
|
||||
}
|
||||
|
@ -338,13 +389,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
s := d.client.CoreV1().Services(namespace)
|
||||
slw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Service.Field
|
||||
options.LabelSelector = d.selectors.Service.Label
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Service.Field
|
||||
options.LabelSelector = d.selectors.Service.Label
|
||||
options.FieldSelector = d.selectors.service.field
|
||||
options.LabelSelector = d.selectors.service.label
|
||||
return s.Watch(options)
|
||||
},
|
||||
}
|
||||
|
@ -360,13 +411,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
i := d.client.ExtensionsV1beta1().Ingresses(namespace)
|
||||
ilw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Ingress.Field
|
||||
options.LabelSelector = d.selectors.Ingress.Label
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Ingress.Field
|
||||
options.LabelSelector = d.selectors.Ingress.Label
|
||||
options.FieldSelector = d.selectors.ingress.field
|
||||
options.LabelSelector = d.selectors.ingress.label
|
||||
return i.Watch(options)
|
||||
},
|
||||
}
|
||||
|
@ -380,13 +431,13 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
case RoleNode:
|
||||
nlw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = d.selectors.Node.Field
|
||||
options.LabelSelector = d.selectors.Node.Label
|
||||
options.FieldSelector = d.selectors.node.field
|
||||
options.LabelSelector = d.selectors.node.label
|
||||
return d.client.CoreV1().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = d.selectors.Node.Field
|
||||
options.LabelSelector = d.selectors.Node.Label
|
||||
options.FieldSelector = d.selectors.node.field
|
||||
options.LabelSelector = d.selectors.node.label
|
||||
return d.client.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -860,10 +860,17 @@ namespaces:
|
|||
# and https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ to learn more about the possible
|
||||
# filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles
|
||||
# only support selectors matching the role itself (e.g. node role can only contain node selectors).
|
||||
selectors:
|
||||
<role>:
|
||||
|
||||
# Note: When making decision about using field/label selector make sure that this
|
||||
# is the best approach - it will prevent Prometheus from reusing single list/watch
|
||||
# for all scrape configs. This might result in a bigger load on the Kubernetes API,
|
||||
# because per each selector combination there will be additional LIST/WATCH. On the other hand,
|
||||
# if you just want to monitor small subset of pods in large cluster it's recommended to use selectors.
|
||||
# Decision, if selectors should be used or not depends on the particular situation.
|
||||
[ selectors:
|
||||
[ - role: <role>
|
||||
[ label: <string> ]
|
||||
[ field: <string> ]
|
||||
[ field: <string> ] ]]
|
||||
```
|
||||
|
||||
Where `<role>` must be `endpoints`, `service`, `pod`, `node`, or
|
||||
|
|
Loading…
Reference in a new issue