Implement Docker discovery (#8629)

* Implement Docker discovery

Signed-off-by: Robert Jacob <xperimental@solidproject.de>
This commit is contained in:
Robert Jacob 2021-03-29 22:30:23 +02:00 committed by GitHub
parent 9549a15c6f
commit b253056163
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 756 additions and 23 deletions

View file

@ -644,6 +644,27 @@ var expectedConf = &Config{
},
},
},
{
JobName: "docker",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&dockerswarm.DockerSDConfig{
Filters: []dockerswarm.Filter{},
Host: "unix:///var/run/docker.sock",
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
},
},
},
{
JobName: "dockerswarm",

View file

@ -268,6 +268,10 @@ scrape_configs:
- authorization:
credentials: abcdef
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
- job_name: dockerswarm
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375

View file

@ -56,6 +56,9 @@ scrape_configs:
- authorization:
credentials: <secret>
docker_sd_configs:
- host: unix:///var/run/docker.sock
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes

View file

@ -0,0 +1,256 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerswarm
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
const (
dockerLabel = model.MetaLabelPrefix + "docker_"
dockerLabelContainerPrefix = dockerLabel + "container_"
dockerLabelContainerID = dockerLabelContainerPrefix + "id"
dockerLabelContainerName = dockerLabelContainerPrefix + "name"
dockerLabelContainerNetworkMode = dockerLabelContainerPrefix + "network_mode"
dockerLabelContainerLabelPrefix = dockerLabelContainerPrefix + "label_"
dockerLabelNetworkPrefix = dockerLabel + "network_"
dockerLabelNetworkIP = dockerLabelNetworkPrefix + "ip"
dockerLabelPortPrefix = dockerLabel + "port_"
dockerLabelPortPrivate = dockerLabelPortPrefix + "private"
dockerLabelPortPublic = dockerLabelPortPrefix + "public"
dockerLabelPortPublicIP = dockerLabelPortPrefix + "public_ip"
)
// DefaultDockerSDConfig is the default Docker SD configuration.
var DefaultDockerSDConfig = DockerSDConfig{
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
Filters: []Filter{},
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
discovery.RegisterConfig(&DockerSDConfig{})
}
// DockerSDConfig is the configuration for Docker (non-swarm) based service discovery.
type DockerSDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
Host string `yaml:"host"`
Port int `yaml:"port"`
Filters []Filter `yaml:"filters"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
}
// Name returns the name of the Config.
func (*DockerSDConfig) Name() string { return "docker" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *DockerSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDockerDiscovery(c, opts.Logger)
}
// SetDirectory joins any relative file paths with dir.
func (c *DockerSDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultDockerSDConfig
type plain DockerSDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if c.Host == "" {
return fmt.Errorf("host missing")
}
if _, err = url.Parse(c.Host); err != nil {
return err
}
return c.HTTPClientConfig.Validate()
}
type DockerDiscovery struct {
*refresh.Discovery
client *client.Client
port int
filters filters.Args
}
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscovery, error) {
var err error
d := &DockerDiscovery{
port: conf.Port,
}
hostURL, err := url.Parse(conf.Host)
if err != nil {
return nil, err
}
opts := []client.Opt{
client.WithHost(conf.Host),
client.WithAPIVersionNegotiation(),
}
d.filters = filters.NewArgs()
for _, f := range conf.Filters {
for _, v := range f.Values {
d.filters.Add(f.Name, v)
}
}
// There are other protocols than HTTP supported by the Docker daemon, like
// unix, which are not supported by the HTTP client. Passing HTTP client
// options to the Docker client makes those non-HTTP requests fail.
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd", false, false)
if err != nil {
return nil, err
}
opts = append(opts,
client.WithHTTPClient(&http.Client{
Transport: rt,
Timeout: time.Duration(conf.RefreshInterval),
}),
client.WithScheme(hostURL.Scheme),
client.WithHTTPHeaders(map[string]string{
"User-Agent": userAgent,
}),
)
}
d.client, err = client.NewClientWithOpts(opts...)
if err != nil {
return nil, fmt.Errorf("error setting up docker client: %w", err)
}
d.Discovery = refresh.NewDiscovery(
logger,
"docker",
time.Duration(conf.RefreshInterval),
d.refresh,
)
return d, nil
}
func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
tg := &targetgroup.Group{
Source: "Docker",
}
containers, err := d.client.ContainerList(ctx, types.ContainerListOptions{Filters: d.filters})
if err != nil {
return nil, fmt.Errorf("error while listing containers: %w", err)
}
networkLabels, err := getNetworksLabels(ctx, d.client, dockerLabel)
if err != nil {
return nil, fmt.Errorf("error while computing network labels: %w", err)
}
for _, c := range containers {
if len(c.Names) == 0 {
continue
}
commonLabels := map[string]string{
dockerLabelContainerID: c.ID,
dockerLabelContainerName: c.Names[0],
dockerLabelContainerNetworkMode: c.HostConfig.NetworkMode,
}
for k, v := range c.Labels {
ln := strutil.SanitizeLabelName(k)
commonLabels[dockerLabelContainerLabelPrefix+ln] = v
}
for _, n := range c.NetworkSettings.Networks {
var added bool
for _, p := range c.Ports {
if p.Type != "tcp" {
continue
}
labels := model.LabelSet{
dockerLabelNetworkIP: model.LabelValue(n.IPAddress),
dockerLabelPortPrivate: model.LabelValue(strconv.FormatUint(uint64(p.PrivatePort), 10)),
}
if p.PublicPort > 0 {
labels[dockerLabelPortPublic] = model.LabelValue(strconv.FormatUint(uint64(p.PublicPort), 10))
labels[dockerLabelPortPublicIP] = model.LabelValue(p.IP)
}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[n.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(p.PrivatePort), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
added = true
}
if !added {
// Use fallback port when no exposed ports are available or if all are non-TCP
labels := model.LabelSet{
dockerLabelNetworkIP: model.LabelValue(n.IPAddress),
}
for k, v := range commonLabels {
labels[model.LabelName(k)] = model.LabelValue(v)
}
for k, v := range networkLabels[n.NetworkID] {
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(n.IPAddress, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)
}
}
}
return []*targetgroup.Group{tg}, nil
}

View file

@ -0,0 +1,101 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dockerswarm
import (
"context"
"fmt"
"testing"
"github.com/go-kit/kit/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
func TestDockerSDRefresh(t *testing.T) {
sdmock := NewSDMock(t, "dockerprom")
sdmock.Setup()
e := sdmock.Endpoint()
url := e[:len(e)-1]
cfgString := fmt.Sprintf(`
---
host: %s
`, url)
var cfg DockerSDConfig
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger())
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Equal(t, 1, len(tgs))
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Equal(t, 2, len(tg.Targets))
for i, lbls := range []model.LabelSet{
{
"__address__": "172.19.0.2:9100",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "node",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "node",
"__meta_docker_container_name": "/dockersd_node_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.2",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9100",
},
{
"__address__": "172.19.0.3:80",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "noport",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "noport",
"__meta_docker_container_name": "/dockersd_noport_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.3",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -72,7 +72,7 @@ func (m *SDMock) SetupHandlers() {
prefix += "v" + v + "/"
}
for _, path := range []string{"_ping", "networks/", "services/", "nodes/", "nodes", "services", "tasks"} {
for _, path := range []string{"_ping", "networks/", "services/", "nodes/", "nodes", "services", "tasks", "containers/"} {
p := path
handler := prefix + p
if p == "_ping" {

View file

@ -18,37 +18,37 @@ import (
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/prometheus/prometheus/util/strutil"
)
const (
swarmLabelNetworkPrefix = swarmLabel + "network_"
swarmLabelNetworkID = swarmLabelNetworkPrefix + "id"
swarmLabelNetworkName = swarmLabelNetworkPrefix + "name"
swarmLabelNetworkScope = swarmLabelNetworkPrefix + "scope"
swarmLabelNetworkInternal = swarmLabelNetworkPrefix + "internal"
swarmLabelNetworkIngress = swarmLabelNetworkPrefix + "ingress"
swarmLabelNetworkLabelPrefix = swarmLabelNetworkPrefix + "label_"
labelNetworkPrefix = "network_"
labelNetworkID = labelNetworkPrefix + "id"
labelNetworkName = labelNetworkPrefix + "name"
labelNetworkScope = labelNetworkPrefix + "scope"
labelNetworkInternal = labelNetworkPrefix + "internal"
labelNetworkIngress = labelNetworkPrefix + "ingress"
labelNetworkLabelPrefix = labelNetworkPrefix + "label_"
)
func (d *Discovery) getNetworksLabels(ctx context.Context) (map[string]map[string]string, error) {
networks, err := d.client.NetworkList(ctx, types.NetworkListOptions{})
func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) {
networks, err := client.NetworkList(ctx, types.NetworkListOptions{})
if err != nil {
return nil, err
}
labels := make(map[string]map[string]string, len(networks))
for _, network := range networks {
labels[network.ID] = map[string]string{
swarmLabelNetworkID: network.ID,
swarmLabelNetworkName: network.Name,
swarmLabelNetworkScope: network.Scope,
swarmLabelNetworkInternal: fmt.Sprintf("%t", network.Internal),
swarmLabelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
}
for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k)
labels[network.ID][swarmLabelNetworkLabelPrefix+ln] = v
labels[network.ID][labelPrefix+labelNetworkLabelPrefix+ln] = v
}
}

View file

@ -51,7 +51,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
return nil, fmt.Errorf("error while listing swarm services: %w", err)
}
networkLabels, err := d.getNetworksLabels(ctx)
networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel)
if err != nil {
return nil, fmt.Errorf("error while computing swarm network labels: %w", err)
}

View file

@ -58,7 +58,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
return nil, fmt.Errorf("error while computing nodes labels and ports: %w", err)
}
networkLabels, err := d.getNetworksLabels(ctx)
networkLabels, err := getNetworksLabels(ctx, d.client, swarmLabel)
if err != nil {
return nil, fmt.Errorf("error while computing swarm network labels: %w", err)
}

View file

@ -0,0 +1 @@
OK

View file

@ -0,0 +1,93 @@
[
{
"Id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"Names": [
"/dockersd_node_1"
],
"Image": "prom/node-exporter:v1.1.2",
"ImageID": "sha256:c19ae228f0699185488b6a1c0debb9c6b79672181356ad455c9a7924a41a01bb",
"Command": "/bin/node_exporter",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 9100,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "node",
"com.docker.compose.version": "1.25.0",
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"prometheus-job": "node"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "dockersd_default"
},
"NetworkSettings": {
"Networks": {
"dockersd_default": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"EndpointID": "423198ed568efa36699c5f13289c46230580a0ad6453d9e726f5e3cf1a911f77",
"Gateway": "172.19.0.1",
"IPAddress": "172.19.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:13:00:02",
"DriverOpts": null
}
}
},
"Mounts": []
},
{
"Id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"Names": [
"/dockersd_noport_1"
],
"Image": "prom/node-exporter:v1.1.2",
"ImageID": "sha256:c19ae228f0699185488b6a1c0debb9c6b79672181356ad455c9a7924a41a01bb",
"Command": "/bin/node_exporter",
"Created": 1616273136,
"Ports": [],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "noport",
"com.docker.compose.version": "1.25.0",
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"prometheus-job": "noport"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "dockersd_default"
},
"NetworkSettings": {
"Networks": {
"dockersd_default": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"EndpointID": "423198ed568efa36699c5f13289c46230580a0ad6453d9e726f5e3cf1a911f77",
"Gateway": "172.19.0.1",
"IPAddress": "172.19.0.3",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:13:00:03",
"DriverOpts": null
}
}
},
"Mounts": []
}
]

View file

@ -0,0 +1,5 @@
Ostype: linux
Server: Docker/19.03.5-ce (linux)
Docker-Experimental: "false"
Api-Version: "1.40"
Cache-Control: no-cache, no-store, must-revalidat

View file

@ -0,0 +1,115 @@
[
{
"Name": "host",
"Id": "50e3f3df9b82eb4a9912d730e431378e7f9e297ce5c524089bfea183201b20a6",
"Created": "2018-03-22T02:44:55.903299699+01:00",
"Scope": "local",
"Driver": "host",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": []
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
},
{
"Name": "bridge",
"Id": "03c5e4fd4ca3b43384b4ee088369251071dc455b4e1783af8de5031e493a4920",
"Created": "2021-03-20T21:19:50.639982673+01:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
},
{
"Name": "dockersd_default",
"Id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"Created": "2021-03-20T21:45:36.392538652+01:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.19.0.0/16",
"Gateway": "172.19.0.1"
}
]
},
"Internal": false,
"Attachable": true,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {
"com.docker.compose.network": "default",
"com.docker.compose.project": "dockersd",
"com.docker.compose.version": "1.25.0"
}
},
{
"Name": "none",
"Id": "4f77bf4a633f2ad042177e4d55852c85acc59e71bea90129908371a747624a7c",
"Created": "2018-03-22T02:44:55.859947227+01:00",
"Scope": "local",
"Driver": "null",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": []
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]

View file

@ -202,6 +202,10 @@ consul_sd_configs:
digitalocean_sd_configs:
[ - <digitalocean_sd_config> ... ]
# List of Docker service discovery configurations.
docker_sd_configs:
[ - <docker_sd_config> ... ]
# List of Docker Swarm service discovery configurations.
dockerswarm_sd_configs:
[ - <dockerswarm_sd_config> ... ]
@ -486,6 +490,94 @@ tls_config:
[ refresh_interval: <duration> | default = 60s ]
```
### `<docker_sd_config>`
Docker SD configurations allow retrieving scrape targets from [Docker Engine](https://docs.docker.com/engine/) hosts.
This SD discovers "containers" and will create a target for each network IP and port the container is configured to expose.
Available meta labels:
* `__meta_docker_container_id`: the id of the container
* `__meta_docker_container_name`: the name of the container
* `__meta_docker_container_network_mode`: the network mode of the container
* `__meta_docker_container_label_<labelname>`: each label of the container
* `__meta_docker_network_id`: the ID of the network
* `__meta_docker_network_name`: the name of the network
* `__meta_docker_network_ingress`: whether the network is ingress
* `__meta_docker_network_internal`: whether the network is internal
* `__meta_docker_network_label_<labelname>`: each label of the network
* `__meta_docker_network_scope`: the scope of the network
* `__meta_docker_network_ip`: the IP of the container in this network
* `__meta_docker_port_private`: the port on the container
* `__meta_docker_port_public`: the external port if a port-mapping exists
* `__meta_docker_port_public_ip`: the public IP if a port-mapping exists
See below for the configuration options for Docker discovery:
```yaml
# Address of the Docker daemon.
host: <string>
# Optional proxy URL.
[ proxy_url: <string> ]
# TLS configuration.
tls_config:
[ <tls_config> ]
# The port to scrape metrics from, when `role` is nodes, and for discovered
# tasks and services that don't have published ports.
[ port: <int> | default = 80 ]
# Optional filters to limit the discovery process to a subset of available
# resources.
# The available filters are listed in the upstream documentation:
# Services: https://docs.docker.com/engine/api/v1.40/#operation/ServiceList
# Tasks: https://docs.docker.com/engine/api/v1.40/#operation/TaskList
# Nodes: https://docs.docker.com/engine/api/v1.40/#operation/NodeList
[ filters:
[ - name: <string>
values: <string>, [...] ]
# The time after which the containers are refreshed.
[ refresh_interval: <duration> | default = 60s ]
# Authentication information used to authenticate to the Docker daemon.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional the `Authorization` header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials with the credentials read from the configured file.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ]
```
The [relabeling phase](#relabel_config) is the preferred and more powerful
way to filter containers. For users with thousands of containers it
can be more efficient to use the Docker API directly which has basic support for
filtering containers (using `filters`).
See [this example Prometheus configuration file](/documentation/examples/prometheus-docker.yml)
for a detailed example of configuring Prometheus for Docker Engine.
### `<dockerswarm_sd_config>`
Docker Swarm SD configurations allow retrieving scrape targets from [Docker Swarm](https://docs.docker.com/engine/swarm/)
@ -601,14 +693,12 @@ role: <string>
# Optional filters to limit the discovery process to a subset of available
# resources.
# The available filters are listed in the upstream documentation:
# Services: https://docs.docker.com/engine/api/v1.40/#operation/ServiceList
# Tasks: https://docs.docker.com/engine/api/v1.40/#operation/TaskList
# Nodes: https://docs.docker.com/engine/api/v1.40/#operation/NodeList
# https://docs.docker.com/engine/api/v1.40/#operation/ContainerList
[ filters:
[ - name: <string>
values: <string>, [...] ]
# The time after which the droplets are refreshed.
# The time after which the service discovery data is refreshed.
[ refresh_interval: <duration> | default = 60s ]
# Authentication information used to authenticate to the Docker daemon.
@ -1826,6 +1916,10 @@ file_sd_configs:
digitalocean_sd_configs:
[ - <digitalocean_sd_config> ... ]
# List of Docker service discovery configurations.
docker_sd_configs:
[ - <docker_sd_config> ... ]
# List of Docker Swarm service discovery configurations.
dockerswarm_sd_configs:
[ - <dockerswarm_sd_config> ... ]

View file

@ -0,0 +1,40 @@
# A example scrape configuration for running Prometheus with Docker.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
# Create a job for Docker daemon.
#
# This example requires Docker daemon to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'
static_configs:
- targets: ['localhost:9323']
# Create a job for Docker Swarm containers.
#
# This example works with cadvisor running using:
# docker run --detach --name cadvisor -l prometheus-job=cadvisor
# --mount type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock,ro
# --mount type=bind,src=/,dst=/rootfs,ro
# --mount type=bind,src=/var/run,dst=/var/run
# --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only
- job_name: 'docker-containers'
docker_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
relabel_configs:
# Only keep containers that have a `prometheus-job` label.
- source_labels: [__meta_docker_container_label_prometheus_job]
regex: .+
action: keep
# Use the task labels that are prefixed by `prometheus-`.
- regex: __meta_docker_container_label_prometheus_(.+)
action: labelmap
replacement: $1