fixes yamllint errors

Signed-off-by: Michal Wasilewski <mwasilewski@gmx.com>
This commit is contained in:
Michal Wasilewski 2021-06-12 12:47:47 +02:00
parent 46153083db
commit 3f686cad8b
No known key found for this signature in database
GPG key ID: 9C9AAD795F94409B
102 changed files with 1353 additions and 1367 deletions

View file

@ -69,8 +69,7 @@ jobs:
command: | command: |
choco upgrade -y golang choco upgrade -y golang
- run: - run:
command: command: refreshenv
refreshenv
- run: - run:
command: | command: |
$env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate $env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate

View file

@ -13,12 +13,12 @@ name: "CodeQL"
on: on:
push: push:
branches: [ main, release-* ] branches: [main, release-*]
pull_request: pull_request:
# The branches below must be a subset of the branches above # The branches below must be a subset of the branches above
branches: [ main ] branches: [main]
schedule: schedule:
- cron: '26 14 * * 1' - cron: "26 14 * * 1"
jobs: jobs:
analyze: analyze:
@ -28,7 +28,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
language: [ 'go', 'javascript' ] language: ["go", "javascript"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more: # Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed

View file

@ -32,13 +32,13 @@ jobs:
- name: Prepare nodepool - name: Prepare nodepool
uses: docker://prominfra/funcbench:master uses: docker://prominfra/funcbench:master
with: with:
entrypoint: 'docker_entrypoint' entrypoint: "docker_entrypoint"
args: make deploy args: make deploy
- name: Delete all resources - name: Delete all resources
if: always() if: always()
uses: docker://prominfra/funcbench:master uses: docker://prominfra/funcbench:master
with: with:
entrypoint: 'docker_entrypoint' entrypoint: "docker_entrypoint"
args: make clean args: make clean
- name: Update status to failure - name: Update status to failure
if: failure() if: failure()

View file

@ -2,9 +2,9 @@ name: CIFuzz
on: on:
pull_request: pull_request:
paths: paths:
- 'go.sum' - "go.sum"
- 'go.mod' - "go.mod"
- '**.go' - "**.go"
jobs: jobs:
Fuzzing: Fuzzing:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -13,12 +13,12 @@ jobs:
id: build id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with: with:
oss-fuzz-project-name: 'prometheus' oss-fuzz-project-name: "prometheus"
dry-run: false dry-run: false
- name: Run Fuzzers - name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with: with:
oss-fuzz-project-name: 'prometheus' oss-fuzz-project-name: "prometheus"
fuzz-seconds: 600 fuzz-seconds: 600
dry-run: false dry-run: false
- name: Upload Crash - name: Upload Crash

View file

@ -1,6 +1,6 @@
on: on:
repository_dispatch: repository_dispatch:
types: [prombench_start,prombench_restart,prombench_stop] types: [prombench_start, prombench_restart, prombench_stop]
name: Prombench Workflow name: Prombench Workflow
env: env:
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}

View file

@ -1,3 +1,4 @@
---
tasks: tasks:
- init: - init:
make build make build

View file

@ -8,13 +8,13 @@ tests:
- interval: 1m - interval: 1m
input_series: input_series:
- series: test_full - series: test_full
values: '0 0' values: "0 0"
- series: test_stale - series: test_stale
values: '0 stale' values: "0 stale"
- series: test_missing - series: test_missing
values: '0 _ _ _ _ _ _ 0' values: "0 _ _ _ _ _ _ 0"
promql_expr_test: promql_expr_test:
# Ensure the sample is evaluated at the time we expect it to be. # Ensure the sample is evaluated at the time we expect it to be.
@ -36,7 +36,7 @@ tests:
eval_time: 59s eval_time: 59s
exp_samples: exp_samples:
- value: 0 - value: 0
labels: 'test_stale' labels: "test_stale"
- expr: test_stale - expr: test_stale
eval_time: 1m eval_time: 1m
exp_samples: [] exp_samples: []
@ -120,7 +120,7 @@ tests:
- series: 'test{job="test", instance="x:0"}' - series: 'test{job="test", instance="x:0"}'
# 2 minutes + 1 second of input data, recording rules should only run # 2 minutes + 1 second of input data, recording rules should only run
# once a minute. # once a minute.
values: '0+1x120' values: "0+1x120"
promql_expr_test: promql_expr_test:
- expr: job:test:count_over_time1m - expr: job:test:count_over_time1m

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- azure_sd_configs: - azure_sd_configs:
- authentication_method: invalid - authentication_method: invalid
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11

View file

@ -3,4 +3,3 @@ scrape_configs:
bearer_token: 1234 bearer_token: 1234
bearer_token_file: somefile bearer_token_file: somefile

View file

@ -5,4 +5,3 @@ scrape_configs:
basic_auth: basic_auth:
username: user username: user
password: password password: password

View file

@ -9,8 +9,8 @@ global:
foo: bar foo: bar
rule_files: rule_files:
- "first.rules" - "first.rules"
- "my/*.rules" - "my/*.rules"
remote_write: remote_write:
- url: http://remote1/push - url: http://remote1/push
@ -46,7 +46,7 @@ remote_read:
key_file: valid_key_file key_file: valid_key_file
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
honor_labels: true honor_labels: true
# scrape_interval is defined by the configured global (15s). # scrape_interval is defined by the configured global (15s).
@ -65,7 +65,7 @@ scrape_configs:
- bar/*.yaml - bar/*.yaml
static_configs: static_configs:
- targets: ['localhost:9090', 'localhost:9191'] - targets: ["localhost:9090", "localhost:9191"]
labels: labels:
my: label my: label
your: label your: label
@ -87,8 +87,7 @@ scrape_configs:
authorization: authorization:
credentials_file: valid_token_file credentials_file: valid_token_file
- job_name: service-x
- job_name: service-x
basic_auth: basic_auth:
username: admin_name username: admin_name
@ -135,12 +134,12 @@ scrape_configs:
regex: expensive_metric.* regex: expensive_metric.*
action: drop action: drop
- job_name: service-y - job_name: service-y
consul_sd_configs: consul_sd_configs:
- server: 'localhost:1234' - server: "localhost:1234"
token: mysecret token: mysecret
services: ['nginx', 'cache', 'mysql'] services: ["nginx", "cache", "mysql"]
tags: ["canary", "v1"] tags: ["canary", "v1"]
node_meta: node_meta:
rack: "123" rack: "123"
@ -154,12 +153,12 @@ scrape_configs:
relabel_configs: relabel_configs:
- source_labels: [__meta_sd_consul_tags] - source_labels: [__meta_sd_consul_tags]
separator: ',' separator: ","
regex: label:([^=]+)=([^,]+) regex: label:([^=]+)=([^,]+)
target_label: ${1} target_label: ${1}
replacement: ${2} replacement: ${2}
- job_name: service-z - job_name: service-z
tls_config: tls_config:
cert_file: valid_cert_file cert_file: valid_cert_file
@ -168,44 +167,43 @@ scrape_configs:
authorization: authorization:
credentials: mysecret credentials: mysecret
- job_name: service-kubernetes - job_name: service-kubernetes
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
api_server: 'https://localhost:1234' api_server: "https://localhost:1234"
tls_config: tls_config:
cert_file: valid_cert_file cert_file: valid_cert_file
key_file: valid_key_file key_file: valid_key_file
basic_auth: basic_auth:
username: 'myusername' username: "myusername"
password: 'mysecret' password: "mysecret"
- job_name: service-kubernetes-namespaces - job_name: service-kubernetes-namespaces
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
api_server: 'https://localhost:1234' api_server: "https://localhost:1234"
namespaces: namespaces:
names: names:
- default - default
basic_auth: basic_auth:
username: 'myusername' username: "myusername"
password_file: valid_password_file password_file: valid_password_file
- job_name: service-marathon
- job_name: service-marathon
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:
- 'https://marathon.example.com:443' - "https://marathon.example.com:443"
auth_token: "mysecret" auth_token: "mysecret"
tls_config: tls_config:
cert_file: valid_cert_file cert_file: valid_cert_file
key_file: valid_key_file key_file: valid_key_file
- job_name: service-ec2 - job_name: service-ec2
ec2_sd_configs: ec2_sd_configs:
- region: us-east-1 - region: us-east-1
access_key: access access_key: access
@ -221,14 +219,14 @@ scrape_configs:
- web - web
- db - db
- job_name: service-lightsail - job_name: service-lightsail
lightsail_sd_configs: lightsail_sd_configs:
- region: us-east-1 - region: us-east-1
access_key: access access_key: access
secret_key: mysecret secret_key: mysecret
profile: profile profile: profile
- job_name: service-azure - job_name: service-azure
azure_sd_configs: azure_sd_configs:
- environment: AzurePublicCloud - environment: AzurePublicCloud
authentication_method: OAuth authentication_method: OAuth
@ -238,41 +236,41 @@ scrape_configs:
client_secret: mysecret client_secret: mysecret
port: 9100 port: 9100
- job_name: service-nerve - job_name: service-nerve
nerve_sd_configs: nerve_sd_configs:
- servers: - servers:
- localhost - localhost
paths: paths:
- /monitoring - /monitoring
- job_name: 0123service-xxx - job_name: 0123service-xxx
metrics_path: /metrics metrics_path: /metrics
static_configs: static_configs:
- targets: - targets:
- localhost:9090 - localhost:9090
- job_name: badfederation - job_name: badfederation
honor_timestamps: false honor_timestamps: false
metrics_path: /federate metrics_path: /federate
static_configs: static_configs:
- targets: - targets:
- localhost:9090 - localhost:9090
- job_name: 測試 - job_name: 測試
metrics_path: /metrics metrics_path: /metrics
static_configs: static_configs:
- targets: - targets:
- localhost:9090 - localhost:9090
- job_name: httpsd - job_name: httpsd
http_sd_configs: http_sd_configs:
- url: 'http://example.com/prometheus' - url: "http://example.com/prometheus"
- job_name: service-triton - job_name: service-triton
triton_sd_configs: triton_sd_configs:
- account: 'testAccount' - account: "testAccount"
dns_suffix: 'triton.example.com' dns_suffix: "triton.example.com"
endpoint: 'triton.example.com' endpoint: "triton.example.com"
port: 9163 port: 9163
refresh_interval: 1m refresh_interval: 1m
version: 1 version: 1
@ -280,21 +278,21 @@ scrape_configs:
cert_file: valid_cert_file cert_file: valid_cert_file
key_file: valid_key_file key_file: valid_key_file
- job_name: digitalocean-droplets - job_name: digitalocean-droplets
digitalocean_sd_configs: digitalocean_sd_configs:
- authorization: - authorization:
credentials: abcdef credentials: abcdef
- job_name: docker - job_name: docker
docker_sd_configs: docker_sd_configs:
- host: unix:///var/run/docker.sock - host: unix:///var/run/docker.sock
- job_name: dockerswarm - job_name: dockerswarm
dockerswarm_sd_configs: dockerswarm_sd_configs:
- host: http://127.0.0.1:2375 - host: http://127.0.0.1:2375
role: nodes role: nodes
- job_name: service-openstack - job_name: service-openstack
openstack_sd_configs: openstack_sd_configs:
- role: instance - role: instance
region: RegionOne region: RegionOne
@ -305,7 +303,7 @@ scrape_configs:
cert_file: valid_cert_file cert_file: valid_cert_file
key_file: valid_key_file key_file: valid_key_file
- job_name: hetzner - job_name: hetzner
hetzner_sd_configs: hetzner_sd_configs:
- role: hcloud - role: hcloud
authorization: authorization:
@ -315,11 +313,11 @@ scrape_configs:
username: abcdef username: abcdef
password: abcdef password: abcdef
- job_name: service-eureka - job_name: service-eureka
eureka_sd_configs: eureka_sd_configs:
- server: 'http://eureka.example.com:8761/eureka' - server: "http://eureka.example.com:8761/eureka"
- job_name: scaleway - job_name: scaleway
scaleway_sd_configs: scaleway_sd_configs:
- role: instance - role: instance
project_id: 11111111-1111-1111-1111-111111111112 project_id: 11111111-1111-1111-1111-111111111112
@ -330,7 +328,7 @@ scrape_configs:
access_key: SCWXXXXXXXXXXXXXXXXX access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111 secret_key: 11111111-1111-1111-1111-111111111111
- job_name: linode-instances - job_name: linode-instances
linode_sd_configs: linode_sd_configs:
- authorization: - authorization:
credentials: abcdef credentials: abcdef

View file

@ -2,8 +2,7 @@ scrape_configs:
- job_name: prometheus - job_name: prometheus
ec2_sd_configs: ec2_sd_configs:
- region: 'us-east-1' - region: "us-east-1"
filters: filters:
- name: 'tag:environment' - name: "tag:environment"
values: values:

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- job_name: "test" - job_name: "test"
metric_relabel_configs: metric_relabel_configs:
- -

View file

@ -1,2 +1,2 @@
remote_read: remote_read:
- -

View file

@ -1,2 +1,2 @@
remote_write: remote_write:
- -

View file

@ -1,2 +1,2 @@
scrape_configs: scrape_configs:
- -

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- job_name: "test" - job_name: "test"
static_configs: static_configs:
- -

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- job_name: "test" - job_name: "test"
relabel_configs: relabel_configs:
- -

View file

@ -1,5 +1,4 @@
scrape_configs: scrape_configs:
- job_name: eureka
- job_name: eureka
eureka_sd_configs: eureka_sd_configs:
- server: eureka.com - server: eureka.com

View file

@ -1,5 +1,4 @@
scrape_configs: scrape_configs:
- job_name: eureka
- job_name: eureka
eureka_sd_configs: eureka_sd_configs:
- server: - server:

View file

@ -1,4 +1,3 @@
scrape_configs: scrape_configs:
- hetzner_sd_configs: - hetzner_sd_configs:
- role: invalid - role: invalid

View file

@ -1,3 +1,3 @@
scrape_configs: scrape_configs:
- http_sd_configs: - http_sd_configs:
- url: ftp://example.com - url: ftp://example.com

View file

@ -1,3 +1,3 @@
scrape_configs: scrape_configs:
- http_sd_configs: - http_sd_configs:
- url: http:// - url: http://

View file

@ -1,3 +1,3 @@
scrape_configs: scrape_configs:
- http_sd_configs: - http_sd_configs:
- url: invalid - url: invalid

View file

@ -3,11 +3,10 @@ scrape_configs:
kubernetes_sd_configs: kubernetes_sd_configs:
- role: pod - role: pod
api_server: 'https://localhost:1234' api_server: "https://localhost:1234"
authorization: authorization:
credentials: 1234 credentials: 1234
basic_auth: basic_auth:
username: user username: user
password: password password: password

View file

@ -3,8 +3,7 @@ scrape_configs:
kubernetes_sd_configs: kubernetes_sd_configs:
- role: node - role: node
api_server: 'https://localhost:1234' api_server: "https://localhost:1234"
bearer_token: 1234 bearer_token: 1234
bearer_token_file: somefile bearer_token_file: somefile

View file

@ -3,10 +3,9 @@ scrape_configs:
kubernetes_sd_configs: kubernetes_sd_configs:
- role: pod - role: pod
api_server: 'https://localhost:1234' api_server: "https://localhost:1234"
bearer_token: 1234 bearer_token: 1234
basic_auth: basic_auth:
username: user username: user
password: password password: password

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- kubernetes_sd_configs: - kubernetes_sd_configs:
- api_server: kubernetes:443 - api_server: kubernetes:443
role: endpoints role: endpoints
namespaces: namespaces:

View file

@ -1,5 +1,4 @@
scrape_configs: scrape_configs:
- kubernetes_sd_configs: - kubernetes_sd_configs:
- api_server: kubernetes:443 - api_server: kubernetes:443
role: vacation role: vacation

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
selectors: selectors:

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
selectors: selectors:

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
relabel_configs: relabel_configs:
- separator: ',' - separator: ","
action: labeldrop action: labeldrop

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
relabel_configs: relabel_configs:
- separator: ',' - separator: ","
action: labelkeep action: labelkeep

View file

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:
- 'https://localhost:1234' - "https://localhost:1234"
auth_token: 1234 auth_token: 1234
authorization: authorization:

View file

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:
- 'https://localhost:1234' - "https://localhost:1234"
auth_token: 1234 auth_token: 1234
auth_token_file: somefile auth_token_file: somefile

View file

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:
- 'https://localhost:1234' - "https://localhost:1234"
auth_token: 1234 auth_token: 1234
basic_auth: basic_auth:

View file

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:
- 'https://localhost:1234' - "https://localhost:1234"
auth_token: 1234 auth_token: 1234
bearer_token: 4567 bearer_token: 4567

View file

@ -4,7 +4,6 @@ global:
evaluation_interval: 30s evaluation_interval: 30s
scrape_configs: scrape_configs:
- job_name: service-marathon
- job_name: service-marathon
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:

View file

@ -1,4 +1,3 @@
scrape_configs: scrape_configs:
- openstack_sd_configs: - openstack_sd_configs:
- availability: invalid - availability: invalid

View file

@ -1,4 +1,3 @@
scrape_configs: scrape_configs:
- openstack_sd_configs: - openstack_sd_configs:
- role: invalid - role: invalid

View file

@ -3,4 +3,3 @@ remote_write:
name: queue1 name: queue1
- url: localhost:9091 - url: localhost:9091
name: queue1 name: queue1

View file

@ -17,8 +17,7 @@ alerting:
- 1.2.3.6:9093 - 1.2.3.6:9093
scrape_configs: scrape_configs:
- job_name: foo
- job_name: foo
static_configs: static_configs:
- targets: - targets:
- localhost:9090 - localhost:9090
@ -27,7 +26,7 @@ scrape_configs:
my: label my: label
your: label your: label
- job_name: bar - job_name: bar
azure_sd_configs: azure_sd_configs:
- environment: AzurePublicCloud - environment: AzurePublicCloud

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- scaleway_sd_configs: - scaleway_sd_configs:
- role: instance - role: instance
project_id: 11111111-1111-1111-1111-111111111112 project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX access_key: SCWXXXXXXXXXXXXXXXXX

View file

@ -1,7 +1,6 @@
scrape_configs: scrape_configs:
- scaleway_sd_configs: - scaleway_sd_configs:
- role: invalid - role: invalid
project_id: 11111111-1111-1111-1111-111111111112 project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar secret_key_file: bar

View file

@ -1,8 +1,7 @@
scrape_configs: scrape_configs:
- scaleway_sd_configs: - scaleway_sd_configs:
- role: instance - role: instance
project_id: 11111111-1111-1111-1111-111111111112 project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar secret_key_file: bar
secret_key: 11111111-1111-1111-1111-111111111112 secret_key: 11111111-1111-1111-1111-111111111112

View file

@ -1,3 +1,3 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
body_size_limit: 100 body_size_limit: 100

View file

@ -1,4 +1,4 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
scrape_interval: 5s scrape_interval: 5s
scrape_timeout: 6s scrape_timeout: 6s

View file

@ -3,7 +3,7 @@ global:
scrape_timeout: 15s scrape_timeout: 15s
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
scrape_interval: 5s scrape_interval: 5s

View file

@ -17,4 +17,4 @@ scrape_configs:
- job_name: prometheus - job_name: prometheus
consult_sd_configs: consult_sd_configs:
- server: 'localhost:1234' - server: "localhost:1234"

View file

@ -1,5 +1,5 @@
scrape_configs: scrape_configs:
- job_name: prometheus - job_name: prometheus
static_configs: static_configs:
- targets: - targets:
- http://bad - http://bad

View file

@ -1,7 +1,6 @@
# the YAML structure is identical to valid.yml but the raw data is different. # the YAML structure is identical to valid.yml but the raw data is different.
- targets: ['localhost:9090', 'example.org:443'] - targets: ["localhost:9090", "example.org:443"]
labels: labels:
foo: bar foo: bar
- targets: ['my.domain'] - targets: ["my.domain"]

View file

@ -2,14 +2,13 @@
# DigitalOcean. # DigitalOcean.
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape. # Discover Node Exporter instances to scrape.
- job_name: 'node' - job_name: "node"
digitalocean_sd_configs: digitalocean_sd_configs:
- authorization: - authorization:
@ -17,10 +16,10 @@ scrape_configs:
relabel_configs: relabel_configs:
# Only scrape targets that have a tag 'monitoring'. # Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_digitalocean_tags] - source_labels: [__meta_digitalocean_tags]
regex: '.*,monitoring,.*' regex: ".*,monitoring,.*"
action: keep action: keep
# Use the public IPv6 address and port 9100 to scrape the target. # Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_digitalocean_public_ipv6] - source_labels: [__meta_digitalocean_public_ipv6]
target_label: __address__ target_label: __address__
replacement: '[$1]:9100' replacement: "[$1]:9100"

View file

@ -1,20 +1,19 @@
# A example scrape configuration for running Prometheus with Docker. # A example scrape configuration for running Prometheus with Docker.
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Create a job for Docker daemon. # Create a job for Docker daemon.
# #
# This example requires Docker daemon to be configured to expose # This example requires Docker daemon to be configured to expose
# Prometheus metrics, as documented here: # Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/ # https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker' - job_name: "docker"
static_configs: static_configs:
- targets: ['localhost:9323'] - targets: ["localhost:9323"]
# Create a job for Docker Swarm containers. # Create a job for Docker Swarm containers.
# #
@ -26,7 +25,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro # --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro # --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only # google/cadvisor -docker_only
- job_name: 'docker-containers' - job_name: "docker-containers"
docker_sd_configs: docker_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
relabel_configs: relabel_configs:

View file

@ -2,18 +2,17 @@
# Docker Swarm. # Docker Swarm.
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Create a job for Docker daemons. # Create a job for Docker daemons.
# #
# This example requires Docker daemons to be configured to expose # This example requires Docker daemons to be configured to expose
# Prometheus metrics, as documented here: # Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/ # https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker' - job_name: "docker"
dockerswarm_sd_configs: dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: nodes role: nodes
@ -34,7 +33,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro # --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro # --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only # google/cadvisor -docker_only
- job_name: 'dockerswarm' - job_name: "dockerswarm"
dockerswarm_sd_configs: dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: tasks role: tasks
@ -51,4 +50,3 @@ scrape_configs:
- regex: __meta_dockerswarm_service_label_prometheus_(.+) - regex: __meta_dockerswarm_service_label_prometheus_(.+)
action: labelmap action: labelmap
replacement: $1 replacement: $1

View file

@ -2,14 +2,13 @@
# Hetzner. # Hetzner.
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape. # Discover Node Exporter instances to scrape.
- job_name: 'node' - job_name: "node"
hetzner_sd_configs: hetzner_sd_configs:
- authorization: - authorization:
@ -19,10 +18,10 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target. # Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4] - source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__ target_label: __address__
replacement: '$1:9100' replacement: "$1:9100"
# Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet. # Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet.
- job_name: 'node_private' - job_name: "node_private"
hetzner_sd_configs: hetzner_sd_configs:
- authorization: - authorization:
@ -32,10 +31,10 @@ scrape_configs:
# Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target. # Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet] - source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet]
target_label: __address__ target_label: __address__
replacement: '$1:9100' replacement: "$1:9100"
# Discover Node Exporter instances to scrape. # Discover Node Exporter instances to scrape.
- job_name: 'node_robot' - job_name: "node_robot"
hetzner_sd_configs: hetzner_sd_configs:
- basic_auth: - basic_auth:
@ -46,4 +45,4 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target. # Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4] - source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__ target_label: __address__
replacement: '$1:9100' replacement: "$1:9100"

View file

@ -16,7 +16,7 @@
# default named port `https`. This works for single API server deployments as # default named port `https`. This works for single API server deployments as
# well as HA API server deployments. # well as HA API server deployments.
scrape_configs: scrape_configs:
- job_name: 'kubernetes-apiservers' - job_name: "kubernetes-apiservers"
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
@ -47,17 +47,22 @@ scrape_configs:
# will add targets for each API server which Kubernetes adds an endpoint to # will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service. # the default/kubernetes service.
relabel_configs: relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - source_labels:
[
__meta_kubernetes_namespace,
__meta_kubernetes_service_name,
__meta_kubernetes_endpoint_port_name,
]
action: keep action: keep
regex: default;kubernetes;https regex: default;kubernetes;https
# Scrape config for nodes (kubelet). # Scrape config for nodes (kubelet).
# #
# Rather than connecting directly to the node, the scrape is proxied though the # Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of # Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of # cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling). # firewalling).
- job_name: 'kubernetes-nodes' - job_name: "kubernetes-nodes"
# Default to scraping over https. If required, just disable this or change to # Default to scraping over https. If required, just disable this or change to
# `http`. # `http`.
@ -88,21 +93,21 @@ scrape_configs:
- action: labelmap - action: labelmap
regex: __meta_kubernetes_node_label_(.+) regex: __meta_kubernetes_node_label_(.+)
# Scrape config for Kubelet cAdvisor. # Scrape config for Kubelet cAdvisor.
# #
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the # (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics. # retrieve those metrics.
# #
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In # HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the # that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag). # --cadvisor-port=0 Kubelet flag).
# #
# This job is not necessary and should be removed in Kubernetes 1.6 and # This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice. # earlier versions, or it will cause the metrics to be scraped twice.
- job_name: 'kubernetes-cadvisor' - job_name: "kubernetes-cadvisor"
# Default to scraping over https. If required, just disable this or change to # Default to scraping over https. If required, just disable this or change to
# `http`. # `http`.
@ -139,11 +144,11 @@ scrape_configs:
- action: labelmap - action: labelmap
regex: __meta_kubernetes_node_label_(.+) regex: __meta_kubernetes_node_label_(.+)
# Example scrape config for service endpoints. # Example scrape config for service endpoints.
# #
# The relabeling allows the actual service scrape endpoint to be configured # The relabeling allows the actual service scrape endpoint to be configured
# for all or only some endpoints. # for all or only some endpoints.
- job_name: 'kubernetes-service-endpoints' - job_name: "kubernetes-service-endpoints"
kubernetes_sd_configs: kubernetes_sd_configs:
- role: endpoints - role: endpoints
@ -185,11 +190,11 @@ scrape_configs:
action: replace action: replace
target_label: kubernetes_name target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter. # Example scrape config for probing services via the Blackbox Exporter.
# #
# The relabeling allows the actual service scrape endpoint to be configured # The relabeling allows the actual service scrape endpoint to be configured
# for all or only some services. # for all or only some services.
- job_name: 'kubernetes-services' - job_name: "kubernetes-services"
metrics_path: /probe metrics_path: /probe
params: params:
@ -216,11 +221,11 @@ scrape_configs:
- source_labels: [__meta_kubernetes_service_name] - source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter. # Example scrape config for probing ingresses via the Blackbox Exporter.
# #
# The relabeling allows the actual ingress scrape endpoint to be configured # The relabeling allows the actual ingress scrape endpoint to be configured
# for all or only some services. # for all or only some services.
- job_name: 'kubernetes-ingresses' - job_name: "kubernetes-ingresses"
metrics_path: /probe metrics_path: /probe
params: params:
@ -234,7 +239,12 @@ scrape_configs:
# - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
# action: keep # action: keep
# regex: true # regex: true
- source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] - source_labels:
[
__meta_kubernetes_ingress_scheme,
__address__,
__meta_kubernetes_ingress_path,
]
regex: (.+);(.+);(.+) regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3} replacement: ${1}://${2}${3}
target_label: __param_target target_label: __param_target
@ -249,12 +259,12 @@ scrape_configs:
- source_labels: [__meta_kubernetes_ingress_name] - source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name target_label: kubernetes_name
# Example scrape config for pods # Example scrape config for pods
# #
# The relabeling allows the actual pod scrape to be configured # The relabeling allows the actual pod scrape to be configured
# for all the declared ports (or port-free target if none is declared) # for all the declared ports (or port-free target if none is declared)
# or only some ports. # or only some ports.
- job_name: 'kubernetes-pods' - job_name: "kubernetes-pods"
kubernetes_sd_configs: kubernetes_sd_configs:
- role: pod - role: pod

View file

@ -3,22 +3,22 @@
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape. # Discover Node Exporter instances to scrape.
- job_name: 'node' - job_name: "node"
linode_sd_configs: linode_sd_configs:
- authorization: - authorization:
credentials: "<replace with a Personal Access Token with linodes:read_only + ips:read_only access>" credentials: "<replace with a Personal Access Token with linodes:read_only + ips:read_only access>"
relabel_configs: relabel_configs:
# Only scrape targets that have a tag 'monitoring'. # Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_linode_tags] - source_labels: [__meta_linode_tags]
regex: '.*,monitoring,.*' regex: ".*,monitoring,.*"
action: keep action: keep
# Use the public IPv6 address and port 9100 to scrape the target. # Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_linode_public_ipv6] - source_labels: [__meta_linode_public_ipv6]
target_label: __address__ target_label: __address__
replacement: '[$1]:9100' replacement: "[$1]:9100"

View file

@ -2,14 +2,13 @@
# (or DC/OS) cluster. # (or DC/OS) cluster.
scrape_configs: scrape_configs:
# Make Prometheus scrape itself for metrics. # Make Prometheus scrape itself for metrics.
- job_name: 'prometheus' - job_name: "prometheus"
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]
# Discover Marathon services to scrape. # Discover Marathon services to scrape.
- job_name: 'marathon' - job_name: "marathon"
# Scrape Marathon itself to discover new services every minute. # Scrape Marathon itself to discover new services every minute.
marathon_sd_configs: marathon_sd_configs:
@ -18,7 +17,6 @@ scrape_configs:
refresh_interval: 60s refresh_interval: 60s
relabel_configs: relabel_configs:
# Only scrape targets that have a port label called 'metrics' specified on a port # Only scrape targets that have a port label called 'metrics' specified on a port
# in their app definitions. Example using a port mapping (container or bridge networking): # in their app definitions. Example using a port mapping (container or bridge networking):
# #
@ -45,7 +43,11 @@ scrape_configs:
# ] # ]
# Match a slash-prefixed string either in a portMapping or a portDefinition label. # Match a slash-prefixed string either in a portMapping or a portDefinition label.
- source_labels: [__meta_marathon_port_mapping_label_metrics,__meta_marathon_port_definition_label_metrics] - source_labels:
[
__meta_marathon_port_mapping_label_metrics,
__meta_marathon_port_definition_label_metrics,
]
regex: (\/.+;|;\/.+) regex: (\/.+;|;\/.+)
action: keep action: keep

View file

@ -20,10 +20,10 @@ rule_files:
# Here it's Prometheus itself. # Here it's Prometheus itself.
scrape_configs: scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus' - job_name: "prometheus"
# metrics_path defaults to '/metrics' # metrics_path defaults to '/metrics'
# scheme defaults to 'http'. # scheme defaults to 'http'.
static_configs: static_configs:
- targets: ['localhost:9090'] - targets: ["localhost:9090"]

View file

@ -10,7 +10,7 @@ kind: ClusterRole
metadata: metadata:
name: prometheus name: prometheus
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: resources:
- nodes - nodes
- nodes/metrics - nodes/metrics
@ -18,13 +18,13 @@ rules:
- endpoints - endpoints
- pods - pods
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- apiGroups: - apiGroups:
- extensions - extensions
- networking.k8s.io - networking.k8s.io
resources: resources:
- ingresses - ingresses
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"] - nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"] verbs: ["get"]
--- ---
apiVersion: v1 apiVersion: v1
@ -42,6 +42,6 @@ roleRef:
kind: ClusterRole kind: ClusterRole
name: prometheus name: prometheus
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: prometheus name: prometheus
namespace: default namespace: default

View file

@ -1,5 +1,5 @@
groups: groups:
- name: yolo - name: yolo
rules: rules:
- record: yolo - record: yolo
expr: rate(hi) expr: rate(hi)

View file

@ -7,4 +7,3 @@ groups:
instance: localhost instance: localhost
annotation: annotation:
summary: annonations is written without s above summary: annonations is written without s above

View file

@ -1,3 +1,3 @@
groups: groups:
- name: yolo - name: yolo
- name: yolo - name: yolo

View file

@ -1,5 +1,5 @@
groups: groups:
- name: yolo - name: yolo
rules: rules:
- record: hola - record: hola
expr: 1 expr: 1

View file

@ -1,5 +1,5 @@
groups: groups:
- name: yolo - name: yolo
rules: rules:
- record: Hi - record: Hi
alert: Hello alert: Hello

View file

@ -1,5 +1,5 @@
groups: groups:
- name: my-group-name - name: my-group-name
interval: 30s # defaults to global interval interval: 30s # defaults to global interval
rules: rules:
- alert: HighErrors - alert: HighErrors
@ -34,7 +34,7 @@ groups:
annotations: annotations:
description: "stuff's happening with {{ $.labels.service }}" description: "stuff's happening with {{ $.labels.service }}"
- name: my-another-name - name: my-another-name
interval: 30s # defaults to global interval interval: 30s # defaults to global interval
rules: rules:
- alert: HighErrors - alert: HighErrors