Merge pull request #8802 from mwasilew2/yaml-linting

Adds yamllinting to Makefile.common
This commit is contained in:
Ben Kochie 2021-06-24 15:59:35 +02:00 committed by GitHub
commit 7cb55d5732
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
105 changed files with 1386 additions and 1365 deletions

View file

@ -11,155 +11,158 @@ executors:
# should also be updated.
golang:
docker:
- image: circleci/golang:1.16-node
- image: circleci/golang:1.16-node
golang_115:
docker:
- image: circleci/golang:1.15-node
- image: circleci/golang:1.15-node
jobs:
test:
executor: golang
steps:
- prometheus/setup_environment
- go/load-cache:
key: v1
- restore_cache:
keys:
- v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v3-npm-deps-
- run:
command: make
environment:
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
GOGC: "20"
# By default Go uses GOMAXPROCS but a Circle CI executor has many
# cores (> 30) while the CPU and RAM resources are throttled. If we
# don't limit this to the number of allocated cores, the job is
# likely to get OOMed and killed.
GOOPTS: "-p 2"
GOMAXPROCS: "2"
GO111MODULE: "on"
- prometheus/check_proto:
version: "3.15.8"
- prometheus/store_artifact:
file: prometheus
- prometheus/store_artifact:
file: promtool
- go/save-cache:
key: v1
- save_cache:
key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
paths:
- /home/circleci/.cache/yarn
- store_test_results:
path: test-results
- prometheus/setup_environment
- go/load-cache:
key: v1
- restore_cache:
keys:
- v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v3-npm-deps-
- run:
command: sudo apt-get install -y yamllint
- run:
command: make
environment:
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
GOGC: "20"
# By default Go uses GOMAXPROCS but a Circle CI executor has many
# cores (> 30) while the CPU and RAM resources are throttled. If we
# don't limit this to the number of allocated cores, the job is
# likely to get OOMed and killed.
GOOPTS: "-p 2"
GOMAXPROCS: "2"
GO111MODULE: "on"
- prometheus/check_proto:
version: "3.15.8"
- prometheus/store_artifact:
file: prometheus
- prometheus/store_artifact:
file: promtool
- go/save-cache:
key: v1
- save_cache:
key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
paths:
- /home/circleci/.cache/yarn
- store_test_results:
path: test-results
test_windows:
executor:
executor:
name: win/default
shell: powershell
working_directory: /go/src/github.com/prometheus/prometheus
steps:
- checkout
- run:
# Temporary workaround until circleci updates go.
command: |
choco upgrade -y golang
- run:
command:
refreshenv
- run:
command: |
$env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate
cd ../..
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
environment:
GOGC: "20"
GOOPTS: "-p 2"
- checkout
- run:
# Temporary workaround until circleci updates go.
command: |
choco upgrade -y golang
- run:
command: refreshenv
- run:
command: |
$env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate
cd ../..
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
environment:
GOGC: "20"
GOOPTS: "-p 2"
test_tsdb_go115:
executor: golang_115
steps:
- checkout
- run: go test ./tsdb/...
- checkout
- run: go test ./tsdb/...
test_mixins:
executor: golang
steps:
- checkout
- run: go install ./cmd/promtool/.
- run:
command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: make clean
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: jb install
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: make
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: git diff --exit-code
working_directory: ~/project/documentation/prometheus-mixin
- checkout
- run: go install ./cmd/promtool/.
- run:
command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: make clean
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: jb install
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: sudo apt-get install -y yamllint
- run:
command: make
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: git diff --exit-code
working_directory: ~/project/documentation/prometheus-mixin
repo_sync:
executor: golang
steps:
- checkout
- run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq"
- run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq")
- run: ./scripts/sync_repo_files.sh
- checkout
- run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq"
- run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq")
- run: ./scripts/sync_repo_files.sh
workflows:
version: 2
prometheus:
jobs:
- test:
filters:
tags:
only: /.*/
- test_tsdb_go115:
filters:
tags:
only: /.*/
- test_mixins:
filters:
tags:
only: /.*/
- test_windows:
filters:
tags:
only: /.*/
- prometheus/build:
name: build
parallelism: 12
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
- prometheus/publish_main:
context: org-context
requires:
- test
- build
filters:
branches:
only: main
image: circleci/golang:1-node
- prometheus/publish_release:
context: org-context
requires:
- test
- build
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /.*/
image: circleci/golang:1-node
- test:
filters:
tags:
only: /.*/
- test_tsdb_go115:
filters:
tags:
only: /.*/
- test_mixins:
filters:
tags:
only: /.*/
- test_windows:
filters:
tags:
only: /.*/
- prometheus/build:
name: build
parallelism: 12
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
- prometheus/publish_main:
context: org-context
requires:
- test
- build
filters:
branches:
only: main
image: circleci/golang:1-node
- prometheus/publish_release:
context: org-context
requires:
- test
- build
filters:
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
branches:
ignore: /.*/
image: circleci/golang:1-node
nightly:
triggers:
- schedule:
@ -169,5 +172,5 @@ workflows:
only:
- main
jobs:
- repo_sync:
context: org-context
- repo_sync:
context: org-context

View file

@ -13,12 +13,12 @@ name: "CodeQL"
on:
push:
branches: [ main, release-* ]
branches: [main, release-*]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
branches: [main]
schedule:
- cron: '26 14 * * 1'
- cron: "26 14 * * 1"
jobs:
analyze:
@ -28,40 +28,40 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript' ]
language: ["go", "javascript"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View file

@ -22,37 +22,37 @@ jobs:
PROVIDER: gke
ZONE: europe-west3-a
steps:
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Prepare nodepool
uses: docker://prominfra/funcbench:master
with:
entrypoint: 'docker_entrypoint'
args: make deploy
- name: Delete all resources
if: always()
uses: docker://prominfra/funcbench:master
with:
entrypoint: 'docker_entrypoint'
args: make clean
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Prepare nodepool
uses: docker://prominfra/funcbench:master
with:
entrypoint: "docker_entrypoint"
args: make deploy
- name: Delete all resources
if: always()
uses: docker://prominfra/funcbench:master
with:
entrypoint: "docker_entrypoint"
args: make clean
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"

View file

@ -2,28 +2,28 @@ name: CIFuzz
on:
pull_request:
paths:
- 'go.sum'
- 'go.mod'
- '**.go'
- "go.sum"
- "go.mod"
- "**.go"
jobs:
Fuzzing:
runs-on: ubuntu-latest
steps:
- name: Build Fuzzers
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with:
oss-fuzz-project-name: 'prometheus'
dry-run: false
- name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: 'prometheus'
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@v1
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
path: ./out/artifacts
Fuzzing:
runs-on: ubuntu-latest
steps:
- name: Build Fuzzers
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with:
oss-fuzz-project-name: "prometheus"
dry-run: false
- name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: "prometheus"
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@v1
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
path: ./out/artifacts

View file

@ -1,6 +1,6 @@
on:
repository_dispatch:
types: [prombench_start,prombench_restart,prombench_stop]
types: [prombench_start, prombench_restart, prombench_stop]
name: Prombench Workflow
env:
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
@ -22,105 +22,105 @@ jobs:
if: github.event.action == 'prombench_start'
runs-on: ubuntu-latest
steps:
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make deploy to start test
id: make_deploy
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make deploy to start test
id: make_deploy
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
benchmark_cancel:
name: Benchmark Cancel
if: github.event.action == 'prombench_stop'
runs-on: ubuntu-latest
steps:
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make clean to stop test
id: make_clean
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make clean to stop test
id: make_clean
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
benchmark_restart:
name: Benchmark Restart
if: github.event.action == 'prombench_restart'
runs-on: ubuntu-latest
steps:
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make clean then make deploy to restart test
id: make_restart
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Run make clean then make deploy to restart test
id: make_restart
uses: docker://prominfra/prombench:master
with:
args: >-
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
make clean;
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
make deploy;
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"

View file

@ -1,5 +1,6 @@
---
tasks:
- init:
- init:
make build
command: |
gp sync-done build

View file

@ -3,14 +3,14 @@ run:
linters:
enable:
- depguard
- golint
- depguard
- golint
issues:
exclude-rules:
- path: _test.go
linters:
- errcheck
- path: _test.go
linters:
- errcheck
linters-settings:
depguard:

26
.yamllint Normal file
View file

@ -0,0 +1,26 @@
---
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
commas: disable
comments: disable
comments-indentation: disable
document-start: disable
indentation:
spaces: consistent
key-duplicates:
ignore: |
config/testdata/section_key_dup.bad.yml
line-length: disable
truthy:
ignore: |
.github/workflows/codeql-analysis.yml
.github/workflows/funcbench.yml
.github/workflows/fuzzing.yml
.github/workflows/prombench.yml

View file

@ -26,7 +26,7 @@ TSDB_BENCHMARK_NUM_METRICS ?= 1000
TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 2m
GOLANGCI_LINT_OPTS ?= --timeout 4m
include Makefile.common

View file

@ -118,7 +118,7 @@ endif
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint unused build test
common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@ -198,6 +198,11 @@ else
endif
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
yamllint .
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint

View file

@ -8,5 +8,5 @@ tests:
values: 3
promql_expr_test:
# This PromQL generates an error.
- expr: "join_1 + on(a) join_2"
# This PromQL generates an error.
- expr: "join_1 + on(a) join_2"

View file

@ -8,13 +8,13 @@ tests:
- interval: 1m
input_series:
- series: test_full
values: '0 0'
values: "0 0"
- series: test_stale
values: '0 stale'
values: "0 stale"
- series: test_missing
values: '0 _ _ _ _ _ _ 0'
values: "0 _ _ _ _ _ _ 0"
promql_expr_test:
# Ensure the sample is evaluated at the time we expect it to be.
@ -36,7 +36,7 @@ tests:
eval_time: 59s
exp_samples:
- value: 0
labels: 'test_stale'
labels: "test_stale"
- expr: test_stale
eval_time: 1m
exp_samples: []
@ -83,10 +83,10 @@ tests:
- expr: count(ALERTS) by (alertname, alertstate)
eval_time: 4m
exp_samples:
- labels: '{alertname="AlwaysFiring",alertstate="firing"}'
value: 1
- labels: '{alertname="InstanceDown",alertstate="pending"}'
value: 1
- labels: '{alertname="AlwaysFiring",alertstate="firing"}'
value: 1
- labels: '{alertname="InstanceDown",alertstate="pending"}'
value: 1
alert_rule_test:
- eval_time: 1d
@ -120,7 +120,7 @@ tests:
- series: 'test{job="test", instance="x:0"}'
# 2 minutes + 1 second of input data, recording rules should only run
# once a minute.
values: '0+1x120'
values: "0+1x120"
promql_expr_test:
- expr: job:test:count_over_time1m

View file

@ -1,4 +1,4 @@
scrape_configs:
- azure_sd_configs:
- authentication_method: invalid
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
- azure_sd_configs:
- authentication_method: invalid
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11

View file

@ -4,4 +4,4 @@ scrape_configs:
- subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id:
client_secret: mysecret
client_secret: mysecret

View file

@ -4,4 +4,4 @@ scrape_configs:
- subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret:
client_secret:

View file

@ -4,4 +4,4 @@ scrape_configs:
- subscription_id:
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: mysecret
client_secret: mysecret

View file

@ -4,4 +4,4 @@ scrape_configs:
- subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id:
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: mysecret
client_secret: mysecret

View file

@ -3,4 +3,3 @@ scrape_configs:
bearer_token: 1234
bearer_token_file: somefile

View file

@ -5,4 +5,3 @@ scrape_configs:
basic_auth:
username: user
password: password

View file

@ -1,24 +1,24 @@
# my global config
global:
scrape_interval: 15s
scrape_interval: 15s
evaluation_interval: 30s
# scrape_timeout is set to the global default (10s).
external_labels:
monitor: codelab
foo: bar
foo: bar
rule_files:
- "first.rules"
- "my/*.rules"
- "first.rules"
- "my/*.rules"
remote_write:
- url: http://remote1/push
name: drop_expensive
write_relabel_configs:
- source_labels: [__name__]
regex: expensive.*
action: drop
- source_labels: [__name__]
regex: expensive.*
action: drop
oauth2:
client_id: "123"
client_secret: "456"
@ -46,300 +46,298 @@ remote_read:
key_file: valid_key_file
scrape_configs:
- job_name: prometheus
- job_name: prometheus
honor_labels: true
# scrape_interval is defined by the configured global (15s).
# scrape_timeout is defined by the global default (10s).
honor_labels: true
# scrape_interval is defined by the configured global (15s).
# scrape_timeout is defined by the global default (10s).
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
file_sd_configs:
- files:
- foo/*.slow.json
- foo/*.slow.yml
- single/file.yml
refresh_interval: 10m
- files:
- bar/*.yaml
file_sd_configs:
- files:
- foo/*.slow.json
- foo/*.slow.yml
- single/file.yml
refresh_interval: 10m
- files:
- bar/*.yaml
static_configs:
- targets: ['localhost:9090', 'localhost:9191']
labels:
my: label
your: label
static_configs:
- targets: ["localhost:9090", "localhost:9191"]
labels:
my: label
your: label
relabel_configs:
- source_labels: [job, __meta_dns_name]
regex: (.*)some-[regex]
target_label: job
replacement: foo-${1}
# action defaults to 'replace'
- source_labels: [abc]
target_label: cde
- replacement: static
target_label: abc
- regex:
replacement: static
target_label: abc
relabel_configs:
- source_labels: [job, __meta_dns_name]
regex: (.*)some-[regex]
target_label: job
replacement: foo-${1}
# action defaults to 'replace'
- source_labels: [abc]
target_label: cde
- replacement: static
target_label: abc
- regex:
replacement: static
target_label: abc
authorization:
credentials_file: valid_token_file
authorization:
credentials_file: valid_token_file
- job_name: service-x
basic_auth:
username: admin_name
password: "multiline\nmysecret\ntest"
scrape_interval: 50s
scrape_timeout: 5s
body_size_limit: 10MB
sample_limit: 1000
metrics_path: /my_path
scheme: https
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
# refresh_interval defaults to 30s.
relabel_configs:
- source_labels: [job]
regex: (.*)some-[regex]
action: drop
- source_labels: [__address__]
modulus: 8
target_label: __tmp_hash
action: hashmod
- source_labels: [__tmp_hash]
regex: 1
action: keep
- action: labelmap
regex: 1
- action: labeldrop
regex: d
- action: labelkeep
regex: k
metric_relabel_configs:
- source_labels: [__name__]
regex: expensive_metric.*
action: drop
- job_name: service-y
consul_sd_configs:
- server: 'localhost:1234'
token: mysecret
services: ['nginx', 'cache', 'mysql']
tags: ["canary", "v1"]
node_meta:
rack: "123"
allow_stale: true
scheme: https
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
insecure_skip_verify: false
relabel_configs:
- source_labels: [__meta_sd_consul_tags]
separator: ','
regex: label:([^=]+)=([^,]+)
target_label: ${1}
replacement: ${2}
- job_name: service-z
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
authorization:
credentials: mysecret
- job_name: service-kubernetes
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://localhost:1234'
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-x
basic_auth:
username: 'myusername'
password: 'mysecret'
username: admin_name
password: "multiline\nmysecret\ntest"
- job_name: service-kubernetes-namespaces
scrape_interval: 50s
scrape_timeout: 5s
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://localhost:1234'
namespaces:
names:
- default
body_size_limit: 10MB
sample_limit: 1000
basic_auth:
username: 'myusername'
password_file: valid_password_file
metrics_path: /my_path
scheme: https
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
# refresh_interval defaults to 30s.
- job_name: service-marathon
marathon_sd_configs:
- servers:
- 'https://marathon.example.com:443'
relabel_configs:
- source_labels: [job]
regex: (.*)some-[regex]
action: drop
- source_labels: [__address__]
modulus: 8
target_label: __tmp_hash
action: hashmod
- source_labels: [__tmp_hash]
regex: 1
action: keep
- action: labelmap
regex: 1
- action: labeldrop
regex: d
- action: labelkeep
regex: k
metric_relabel_configs:
- source_labels: [__name__]
regex: expensive_metric.*
action: drop
- job_name: service-y
consul_sd_configs:
- server: "localhost:1234"
token: mysecret
services: ["nginx", "cache", "mysql"]
tags: ["canary", "v1"]
node_meta:
rack: "123"
allow_stale: true
scheme: https
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
insecure_skip_verify: false
relabel_configs:
- source_labels: [__meta_sd_consul_tags]
separator: ","
regex: label:([^=]+)=([^,]+)
target_label: ${1}
replacement: ${2}
- job_name: service-z
auth_token: "mysecret"
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-ec2
ec2_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
filters:
- name: tag:environment
values:
- prod
authorization:
credentials: mysecret
- name: tag:service
values:
- web
- db
- job_name: service-kubernetes
- job_name: service-lightsail
lightsail_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
kubernetes_sd_configs:
- role: endpoints
api_server: "https://localhost:1234"
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-azure
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: mysecret
port: 9100
basic_auth:
username: "myusername"
password: "mysecret"
- job_name: service-nerve
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
- job_name: service-kubernetes-namespaces
- job_name: 0123service-xxx
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
kubernetes_sd_configs:
- role: endpoints
api_server: "https://localhost:1234"
namespaces:
names:
- default
- job_name: badfederation
honor_timestamps: false
metrics_path: /federate
static_configs:
- targets:
- localhost:9090
basic_auth:
username: "myusername"
password_file: valid_password_file
- job_name: 測試
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
- job_name: service-marathon
marathon_sd_configs:
- servers:
- "https://marathon.example.com:443"
- job_name: httpsd
http_sd_configs:
- url: 'http://example.com/prometheus'
auth_token: "mysecret"
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-triton
triton_sd_configs:
- account: 'testAccount'
dns_suffix: 'triton.example.com'
endpoint: 'triton.example.com'
port: 9163
refresh_interval: 1m
version: 1
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-ec2
ec2_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
filters:
- name: tag:environment
values:
- prod
- job_name: digitalocean-droplets
digitalocean_sd_configs:
- authorization:
credentials: abcdef
- name: tag:service
values:
- web
- db
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
- job_name: service-lightsail
lightsail_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
- job_name: dockerswarm
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
- job_name: service-azure
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: mysecret
port: 9100
- job_name: service-openstack
openstack_sd_configs:
- role: instance
region: RegionOne
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-nerve
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
- job_name: hetzner
hetzner_sd_configs:
- role: hcloud
authorization:
credentials: abcdef
- role: robot
basic_auth:
username: abcdef
password: abcdef
- job_name: 0123service-xxx
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
- job_name: service-eureka
eureka_sd_configs:
- server: 'http://eureka.example.com:8761/eureka'
- job_name: badfederation
honor_timestamps: false
metrics_path: /federate
static_configs:
- targets:
- localhost:9090
- job_name: scaleway
scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111
- role: baremetal
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111
- job_name: 測試
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
- job_name: linode-instances
linode_sd_configs:
- authorization:
credentials: abcdef
- job_name: httpsd
http_sd_configs:
- url: "http://example.com/prometheus"
- job_name: service-triton
triton_sd_configs:
- account: "testAccount"
dns_suffix: "triton.example.com"
endpoint: "triton.example.com"
port: 9163
refresh_interval: 1m
version: 1
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: digitalocean-droplets
digitalocean_sd_configs:
- authorization:
credentials: abcdef
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
- job_name: dockerswarm
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
- job_name: service-openstack
openstack_sd_configs:
- role: instance
region: RegionOne
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: hetzner
hetzner_sd_configs:
- role: hcloud
authorization:
credentials: abcdef
- role: robot
basic_auth:
username: abcdef
password: abcdef
- job_name: service-eureka
eureka_sd_configs:
- server: "http://eureka.example.com:8761/eureka"
- job_name: scaleway
scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111
- role: baremetal
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111
- job_name: linode-instances
linode_sd_configs:
- authorization:
credentials: abcdef
alerting:
alertmanagers:
- scheme: https
static_configs:
- targets:
- "1.2.3.4:9093"
- "1.2.3.5:9093"
- "1.2.3.6:9093"
- scheme: https
static_configs:
- targets:
- "1.2.3.4:9093"
- "1.2.3.5:9093"
- "1.2.3.6:9093"

View file

@ -2,8 +2,7 @@ scrape_configs:
- job_name: prometheus
ec2_sd_configs:
- region: 'us-east-1'
filters:
- name: 'tag:environment'
values:
- region: "us-east-1"
filters:
- name: "tag:environment"
values:

View file

@ -1,3 +1,3 @@
alerting:
alert_relabel_configs:
-
-

View file

@ -1,4 +1,4 @@
alerting:
alertmanagers:
- relabel_configs:
-
- relabel_configs:
-

View file

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
metric_relabel_configs:
-
- job_name: "test"
metric_relabel_configs:
-

View file

@ -1,2 +1,2 @@
remote_read:
-
-

View file

@ -1,2 +1,2 @@
remote_write:
-
-

View file

@ -1,4 +1,4 @@
remote_write:
- url: "foo"
write_relabel_configs:
-
-

View file

@ -1,2 +1,2 @@
scrape_configs:
-
-

View file

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
static_configs:
-
- job_name: "test"
static_configs:
-

View file

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
relabel_configs:
-
- job_name: "test"
relabel_configs:
-

View file

@ -1,5 +1,4 @@
scrape_configs:
- job_name: eureka
eureka_sd_configs:
- server: eureka.com
- job_name: eureka
eureka_sd_configs:
- server: eureka.com

View file

@ -1,5 +1,4 @@
scrape_configs:
- job_name: eureka
eureka_sd_configs:
- server:
- job_name: eureka
eureka_sd_configs:
- server:

View file

@ -1,4 +1,3 @@
scrape_configs:
- hetzner_sd_configs:
- role: invalid
- hetzner_sd_configs:
- role: invalid

View file

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- url: ftp://example.com
- http_sd_configs:
- url: ftp://example.com

View file

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- url: http://
- http_sd_configs:
- url: http://

View file

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- url: invalid
- http_sd_configs:
- url: invalid

View file

@ -2,12 +2,11 @@ scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: pod
api_server: 'https://localhost:1234'
- role: pod
api_server: "https://localhost:1234"
authorization:
authorization:
credentials: 1234
basic_auth:
username: user
password: password
basic_auth:
username: user
password: password

View file

@ -2,9 +2,8 @@ scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: node
api_server: 'https://localhost:1234'
bearer_token: 1234
bearer_token_file: somefile
- role: node
api_server: "https://localhost:1234"
bearer_token: 1234
bearer_token_file: somefile

View file

@ -2,11 +2,10 @@ scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: pod
api_server: 'https://localhost:1234'
bearer_token: 1234
basic_auth:
username: user
password: password
- role: pod
api_server: "https://localhost:1234"
bearer_token: 1234
basic_auth:
username: user
password: password

View file

@ -1,4 +1,4 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints

View file

@ -1,6 +1,6 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: pod
authorization:
credentials: 1234
- role: pod
authorization:
credentials: 1234

View file

@ -1,6 +1,6 @@
scrape_configs:
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: endpoints
namespaces:
foo: bar
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: endpoints
namespaces:
foo: bar

View file

@ -1,5 +1,4 @@
scrape_configs:
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: vacation
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: vacation

View file

@ -1,11 +1,11 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,12 +3,12 @@ scrape_configs:
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"
- role: "service"
label: "foo=bar"
field: "metadata.status=Running"
- role: "endpoints"
label: "foo=bar"
field: "metadata.status=Running"
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"
- role: "service"
label: "foo=bar"
field: "metadata.status=Running"
- role: "endpoints"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,12 +3,12 @@ scrape_configs:
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: "service"
label: "foo=bar"
field: "metadata.status=Running"
- role: "endpoints"
label: "foo=bar"
field: "metadata.status=Running"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: "service"
label: "foo=bar"
field: "metadata.status=Running"
- role: "endpoints"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -1,7 +1,7 @@
scrape_configs:
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
field: "metadata.status-Running"
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:
- role: "pod"
field: "metadata.status-Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: ingress
selectors:
- role: "node"
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: ingress
selectors:
- role: "ingress"
- role: "ingress"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: node
selectors:
- role: "pod"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: node
selectors:
- role: "node"
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: pod
selectors:
- role: "node"
- role: "node"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,11 +3,11 @@ scrape_configs:
kubernetes_sd_configs:
- role: pod
selectors:
- role: "pod"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"
- role: pod
selectors:
- role: "pod"
- role: "pod"
label: "foo in (bar,baz)"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: service
selectors:
- role: "pod"
- role: "pod"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -3,6 +3,6 @@ scrape_configs:
kubernetes_sd_configs:
- role: service
selectors:
- role: "service"
- role: "service"
label: "foo=bar"
field: "metadata.status=Running"

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- modulus: 8
- modulus: 8
action: labeldrop

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- separator: ','
- separator: ","
action: labeldrop

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- replacement: yolo-{1}
- replacement: yolo-{1}
action: labeldrop

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- modulus: 8
- modulus: 8
action: labelkeep

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- separator: ','
- separator: ","
action: labelkeep

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- replacement: yolo-{1}
- replacement: yolo-{1}
action: labelkeep

View file

@ -1,3 +1,3 @@
global:
external_labels:
name: !!binary "/w=="
name: !!binary "/w=="

View file

@ -2,9 +2,9 @@ scrape_configs:
- job_name: prometheus
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- servers:
- "https://localhost:1234"
auth_token: 1234
authorization:
credentials: 4567
auth_token: 1234
authorization:
credentials: 4567

View file

@ -2,8 +2,8 @@ scrape_configs:
- job_name: prometheus
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- servers:
- "https://localhost:1234"
auth_token: 1234
auth_token_file: somefile
auth_token: 1234
auth_token_file: somefile

View file

@ -2,10 +2,10 @@ scrape_configs:
- job_name: prometheus
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- servers:
- "https://localhost:1234"
auth_token: 1234
basic_auth:
username: user
password: password
auth_token: 1234
basic_auth:
username: user
password: password

View file

@ -2,8 +2,8 @@ scrape_configs:
- job_name: prometheus
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- servers:
- "https://localhost:1234"
auth_token: 1234
bearer_token: 4567
auth_token: 1234
bearer_token: 4567

View file

@ -1,10 +1,9 @@
# my global config
global:
scrape_interval: 15s
scrape_interval: 15s
evaluation_interval: 30s
scrape_configs:
- job_name: service-marathon
marathon_sd_configs:
- servers:
- job_name: service-marathon
marathon_sd_configs:
- servers:

View file

@ -1,4 +1,3 @@
scrape_configs:
- openstack_sd_configs:
- availability: invalid
- openstack_sd_configs:
- availability: invalid

View file

@ -1,4 +1,3 @@
scrape_configs:
- openstack_sd_configs:
- role: invalid
- openstack_sd_configs:
- role: invalid

View file

@ -2,4 +2,4 @@ remote_read:
- url: localhost:9090
name: queue1
headers:
"x-prometheus-remote-write-version": "somehack"
"x-prometheus-remote-write-version": "somehack"

View file

@ -3,4 +3,3 @@ remote_write:
name: queue1
- url: localhost:9091
name: queue1

View file

@ -1,147 +1,146 @@
alerting:
alertmanagers:
- scheme: https
- scheme: https
file_sd_configs:
- files:
- foo/*.slow.json
- foo/*.slow.yml
refresh_interval: 10m
- files:
- bar/*.yaml
static_configs:
- targets:
- 1.2.3.4:9093
- 1.2.3.5:9093
- 1.2.3.6:9093
scrape_configs:
- job_name: foo
static_configs:
- targets:
- localhost:9090
- localhost:9191
labels:
my: label
your: label
- job_name: bar
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: <secret>
port: 9100
consul_sd_configs:
- server: localhost:1234
token: <secret>
services: [nginx, cache, mysql]
tags: [canary, v1]
node_meta:
rack: "123"
allow_stale: true
scheme: https
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
digitalocean_sd_configs:
- authorization:
credentials: <secret>
docker_sd_configs:
- host: unix:///var/run/docker.sock
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
ec2_sd_configs:
- region: us-east-1
access_key: access
secret_key: <secret>
profile: profile
filters:
- name: tag:environment
values:
- prod
- name: tag:service
values:
- web
- db
file_sd_configs:
- files:
- foo/*.slow.json
- foo/*.slow.yml
refresh_interval: 10m
- files:
- bar/*.yaml
- files:
- single/file.yml
kubernetes_sd_configs:
- role: endpoints
api_server: https://localhost:1234
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
basic_auth:
username: username
password: <secret>
- role: endpoints
api_server: https://localhost:1234
namespaces:
names:
- default
basic_auth:
username: username
password_file: valid_password_file
marathon_sd_configs:
- servers:
- https://marathon.example.com:443
auth_token: <secret>
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
openstack_sd_configs:
- role: instance
region: RegionOne
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
static_configs:
- targets:
- 1.2.3.4:9093
- 1.2.3.5:9093
- 1.2.3.6:9093
- localhost:9093
scrape_configs:
- job_name: foo
static_configs:
- targets:
- localhost:9090
- localhost:9191
labels:
my: label
your: label
- job_name: bar
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: <secret>
port: 9100
consul_sd_configs:
- server: localhost:1234
token: <secret>
services: [nginx, cache, mysql]
tags: [canary, v1]
node_meta:
rack: "123"
allow_stale: true
scheme: https
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
digitalocean_sd_configs:
- authorization:
credentials: <secret>
docker_sd_configs:
- host: unix:///var/run/docker.sock
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
ec2_sd_configs:
- region: us-east-1
access_key: access
secret_key: <secret>
profile: profile
filters:
- name: tag:environment
values:
- prod
- name: tag:service
values:
- web
- db
file_sd_configs:
- files:
- single/file.yml
kubernetes_sd_configs:
- role: endpoints
api_server: https://localhost:1234
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
basic_auth:
username: username
password: <secret>
- role: endpoints
api_server: https://localhost:1234
namespaces:
names:
- default
basic_auth:
username: username
password_file: valid_password_file
marathon_sd_configs:
- servers:
- https://marathon.example.com:443
auth_token: <secret>
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
openstack_sd_configs:
- role: instance
region: RegionOne
port: 80
refresh_interval: 1m
tls_config:
ca_file: valid_ca_file
cert_file: valid_cert_file
key_file: valid_key_file
static_configs:
- targets:
- localhost:9093
triton_sd_configs:
- account: testAccount
dns_suffix: triton.example.com
endpoint: triton.example.com
port: 9163
refresh_interval: 1m
version: 1
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
triton_sd_configs:
- account: testAccount
dns_suffix: triton.example.com
endpoint: triton.example.com
port: 9163
refresh_interval: 1m
version: 1
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file

View file

@ -1,5 +1,5 @@
scrape_configs:
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX

View file

@ -1,7 +1,6 @@
scrape_configs:
- scaleway_sd_configs:
- role: invalid
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar
- scaleway_sd_configs:
- role: invalid
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar

View file

@ -1,8 +1,7 @@
scrape_configs:
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar
secret_key: 11111111-1111-1111-1111-111111111112
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar
secret_key: 11111111-1111-1111-1111-111111111112

View file

@ -1,3 +1,3 @@
scrape_configs:
- job_name: prometheus
body_size_limit: 100
- job_name: prometheus
body_size_limit: 100

View file

@ -1,4 +1,4 @@
scrape_configs:
- job_name: prometheus
scrape_interval: 5s
scrape_timeout: 6s
- job_name: prometheus
scrape_interval: 5s
scrape_timeout: 6s

View file

@ -1,16 +1,16 @@
global:
scrape_interval: 15s
scrape_timeout: 15s
scrape_interval: 15s
scrape_timeout: 15s
scrape_configs:
- job_name: prometheus
- job_name: prometheus
scrape_interval: 5s
scrape_interval: 5s
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com
dns_sd_configs:
- refresh_interval: 15s
names:
- first.dns.address.domain.com
- second.dns.address.domain.com
- names:
- first.dns.address.domain.com

View file

@ -2,5 +2,5 @@ scrape_configs:
- job_name: prometheus
relabel_configs:
- source_labels: [__address__]
modulus: 8
action: hashmod
modulus: 8
action: hashmod

View file

@ -1,12 +1,12 @@
# my global config
global:
scrape_interval: 15s
scrape_interval: 15s
evaluation_interval: 30s
# scrape_timeout is set to the global default (10s).
external_labels:
monitor: codelab
foo: bar
monitor: codelab
foo: bar
rule_files:
- "first.rules"
@ -17,4 +17,4 @@ scrape_configs:
- job_name: prometheus
consult_sd_configs:
- server: 'localhost:1234'
- server: "localhost:1234"

View file

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- http://bad
- job_name: prometheus
static_configs:
- targets:
- http://bad

View file

@ -1,7 +1,6 @@
# the YAML structure is identical to valid.yml but the raw data is different.
- targets: ['localhost:9090', 'example.org:443']
- targets: ["localhost:9090", "example.org:443"]
labels:
foo: bar
- targets: ['my.domain']
- targets: ["my.domain"]

View file

@ -13,15 +13,15 @@ spec:
app: rabbitmq
spec:
containers:
- image: rabbitmq:3.5.4-management
name: rabbitmq
ports:
- containerPort: 5672
name: service
- containerPort: 15672
name: management
- image: kbudde/rabbitmq-exporter
name: rabbitmq-exporter
ports:
- containerPort: 9090
name: exporter
- image: rabbitmq:3.5.4-management
name: rabbitmq
ports:
- containerPort: 5672
name: service
- containerPort: 15672
name: management
- image: kbudde/rabbitmq-exporter
name: rabbitmq-exporter
ports:
- containerPort: 9090
name: exporter

View file

@ -6,9 +6,9 @@ metadata:
name: rabbitmq
spec:
ports:
- port: 9090
name: exporter
targetPort: exporter
protocol: TCP
- port: 9090
name: exporter
targetPort: exporter
protocol: TCP
selector:
app: rabbitmq

View file

@ -2,14 +2,13 @@
# DigitalOcean.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
digitalocean_sd_configs:
- authorization:
@ -17,10 +16,10 @@ scrape_configs:
relabel_configs:
# Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_digitalocean_tags]
regex: '.*,monitoring,.*'
regex: ".*,monitoring,.*"
action: keep
# Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_digitalocean_public_ipv6]
target_label: __address__
replacement: '[$1]:9100'
replacement: "[$1]:9100"

View file

@ -1,20 +1,19 @@
# A example scrape configuration for running Prometheus with Docker.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Create a job for Docker daemon.
#
# This example requires Docker daemon to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'
- job_name: "docker"
static_configs:
- targets: ['localhost:9323']
- targets: ["localhost:9323"]
# Create a job for Docker Swarm containers.
#
@ -26,7 +25,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only
- job_name: 'docker-containers'
- job_name: "docker-containers"
docker_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
relabel_configs:

View file

@ -2,18 +2,17 @@
# Docker Swarm.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Create a job for Docker daemons.
#
# This example requires Docker daemons to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'
- job_name: "docker"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: nodes
@ -34,7 +33,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only
- job_name: 'dockerswarm'
- job_name: "dockerswarm"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: tasks
@ -51,4 +50,3 @@ scrape_configs:
- regex: __meta_dockerswarm_service_label_prometheus_(.+)
action: labelmap
replacement: $1

View file

@ -2,14 +2,13 @@
# Hetzner.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
hetzner_sd_configs:
- authorization:
@ -19,10 +18,10 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"
# Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet.
- job_name: 'node_private'
- job_name: "node_private"
hetzner_sd_configs:
- authorization:
@ -32,10 +31,10 @@ scrape_configs:
# Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"
# Discover Node Exporter instances to scrape.
- job_name: 'node_robot'
- job_name: "node_robot"
hetzner_sd_configs:
- basic_auth:
@ -46,4 +45,4 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"

View file

@ -16,275 +16,285 @@
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
scrape_configs:
- job_name: 'kubernetes-apiservers'
- job_name: "kubernetes-apiservers"
kubernetes_sd_configs:
- role: endpoints
kubernetes_sd_configs:
- role: endpoints
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels:
[
__meta_kubernetes_namespace,
__meta_kubernetes_service_name,
__meta_kubernetes_endpoint_port_name,
]
action: keep
regex: default;kubernetes;https
# Scrape config for nodes (kubelet).
#
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: 'kubernetes-nodes'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Scrape config for Kubelet cAdvisor.
#
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics.
#
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag).
#
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
- job_name: 'kubernetes-cadvisor'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# Starting Kubernetes 1.7.3 the cAdvisor metrics are under /metrics/cadvisor.
# Kubernetes CIS Benchmark recommends against enabling the insecure HTTP
# servers of Kubernetes, therefore the cAdvisor metrics on the secure handler
# are used.
metrics_path: /metrics/cadvisor
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Example scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some endpoints.
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
# Example relabel to scrape only endpoints that have
# "example.io/should_be_scraped = true" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped]
# action: keep
# regex: true
# Scrape config for nodes (kubelet).
#
# Example relabel to customize metric path based on endpoints
# "example.io/metric_path = <metric path>" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path]
# action: replace
# target_label: __metrics_path__
# regex: (.+)
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: "kubernetes-nodes"
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Scrape config for Kubelet cAdvisor.
#
# Example relabel to scrape only single, desired port for the service based
# on endpoints "example.io/scrape_port = <port>" annotation.
# - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port]
# action: replace
# regex: ([^:]+)(?::\d+)?;(\d+)
# replacement: $1:$2
# target_label: __address__
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics.
#
# Example relabel to configure scrape scheme for all service scrape targets
# based on endpoints "example.io/scrape_scheme = <scheme>" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme]
# action: replace
# target_label: __scheme__
# regex: (https?)
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some services.
- job_name: 'kubernetes-services'
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
# Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation
# - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed]
# action: keep
# regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# for all or only some services.
- job_name: 'kubernetes-ingresses'
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: ingress
relabel_configs:
# Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation
# - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
# action: keep
# regex: true
- source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape to be configured
# for all the declared ports (or port-free target if none is declared)
# or only some ports.
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
# Example relabel to scrape only pods that have
# "example.io/should_be_scraped = true" annotation.
# - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped]
# action: keep
# regex: true
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag).
#
# Example relabel to customize metric path based on pod
# "example.io/metric_path = <metric path>" annotation.
# - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path]
# action: replace
# target_label: __metrics_path__
# regex: (.+)
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
- job_name: "kubernetes-cadvisor"
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# Starting Kubernetes 1.7.3 the cAdvisor metrics are under /metrics/cadvisor.
# Kubernetes CIS Benchmark recommends against enabling the insecure HTTP
# servers of Kubernetes, therefore the cAdvisor metrics on the secure handler
# are used.
metrics_path: /metrics/cadvisor
# This TLS & authorization config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Example scrape config for service endpoints.
#
# Example relabel to scrape only single, desired port for the pod
# based on pod "example.io/scrape_port = <port>" annotation.
# - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port]
# action: replace
# regex: ([^:]+)(?::\d+)?;(\d+)
# replacement: $1:$2
# target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some endpoints.
- job_name: "kubernetes-service-endpoints"
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
# Example relabel to scrape only endpoints that have
# "example.io/should_be_scraped = true" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped]
# action: keep
# regex: true
#
# Example relabel to customize metric path based on endpoints
# "example.io/metric_path = <metric path>" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path]
# action: replace
# target_label: __metrics_path__
# regex: (.+)
#
# Example relabel to scrape only single, desired port for the service based
# on endpoints "example.io/scrape_port = <port>" annotation.
# - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port]
# action: replace
# regex: ([^:]+)(?::\d+)?;(\d+)
# replacement: $1:$2
# target_label: __address__
#
# Example relabel to configure scrape scheme for all service scrape targets
# based on endpoints "example.io/scrape_scheme = <scheme>" annotation.
# - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme]
# action: replace
# target_label: __scheme__
# regex: (https?)
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some services.
- job_name: "kubernetes-services"
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
relabel_configs:
# Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation
# - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed]
# action: keep
# regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# for all or only some services.
- job_name: "kubernetes-ingresses"
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: ingress
relabel_configs:
# Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation
# - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
# action: keep
# regex: true
- source_labels:
[
__meta_kubernetes_ingress_scheme,
__address__,
__meta_kubernetes_ingress_path,
]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape to be configured
# for all the declared ports (or port-free target if none is declared)
# or only some ports.
- job_name: "kubernetes-pods"
kubernetes_sd_configs:
- role: pod
relabel_configs:
# Example relabel to scrape only pods that have
# "example.io/should_be_scraped = true" annotation.
# - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped]
# action: keep
# regex: true
#
# Example relabel to customize metric path based on pod
# "example.io/metric_path = <metric path>" annotation.
# - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path]
# action: replace
# target_label: __metrics_path__
# regex: (.+)
#
# Example relabel to scrape only single, desired port for the pod
# based on pod "example.io/scrape_port = <port>" annotation.
# - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port]
# action: replace
# regex: ([^:]+)(?::\d+)?;(\d+)
# replacement: $1:$2
# target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name

View file

@ -3,22 +3,22 @@
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
linode_sd_configs:
- authorization:
credentials: "<replace with a Personal Access Token with linodes:read_only + ips:read_only access>"
relabel_configs:
# Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_linode_tags]
regex: '.*,monitoring,.*'
regex: ".*,monitoring,.*"
action: keep
# Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_linode_public_ipv6]
target_label: __address__
replacement: '[$1]:9100'
replacement: "[$1]:9100"

View file

@ -2,23 +2,21 @@
# (or DC/OS) cluster.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Marathon services to scrape.
- job_name: 'marathon'
- job_name: "marathon"
# Scrape Marathon itself to discover new services every minute.
marathon_sd_configs:
- servers:
- http://marathon.mesos:8080
- http://marathon.mesos:8080
refresh_interval: 60s
relabel_configs:
# Only scrape targets that have a port label called 'metrics' specified on a port
# in their app definitions. Example using a port mapping (container or bridge networking):
#
@ -45,7 +43,11 @@ scrape_configs:
# ]
# Match a slash-prefixed string either in a portMapping or a portDefinition label.
- source_labels: [__meta_marathon_port_mapping_label_metrics,__meta_marathon_port_definition_label_metrics]
- source_labels:
[
__meta_marathon_port_mapping_label_metrics,
__meta_marathon_port_definition_label_metrics,
]
regex: (\/.+;|;\/.+)
action: keep
@ -53,7 +55,7 @@ scrape_configs:
- source_labels: [__meta_marathon_port_mapping_label_metrics]
regex: (\/.+)
target_label: __metrics_path__
# If a portDefinition 'metrics' label is set, use the label value as the URI to scrape.
- source_labels: [__meta_marathon_port_definition_label_metrics]
regex: (\/.+)

View file

@ -1,15 +1,15 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
@ -20,10 +20,10 @@ rule_files:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]

View file

@ -10,22 +10,22 @@ kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"]
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
@ -42,6 +42,6 @@ roleRef:
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: default
- kind: ServiceAccount
name: prometheus
namespace: default

View file

@ -1,7 +1,7 @@
groups:
- name: yolo
rules:
- alert: hola
expr: 1
annotations:
ins-tance: localhost
- alert: hola
expr: 1
annotations:
ins-tance: localhost

View file

@ -1,5 +1,5 @@
groups:
- name: yolo
rules:
- record: yolo
expr: rate(hi)
- name: yolo
rules:
- record: yolo
expr: rate(hi)

View file

@ -7,4 +7,3 @@ groups:
instance: localhost
annotation:
summary: annonations is written without s above

View file

@ -1,3 +1,3 @@
groups:
- name: yolo
- name: yolo
- name: yolo
- name: yolo

View file

@ -1,7 +1,7 @@
groups:
- name: yolo
rules:
- record: hola
expr: 1
labels:
__name__: anything
- name: yolo
rules:
- record: hola
expr: 1
labels:
__name__: anything

View file

@ -1,5 +1,5 @@
groups:
- name: yolo
rules:
- record: strawberry{flavor="sweet"}
expr: 1
- record: strawberry{flavor="sweet"}
expr: 1

View file

@ -1,4 +1,4 @@
groups:
- name: yolo
rules:
- record: ylo
- record: ylo

Some files were not shown because too many files have changed in this diff Show more