mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' of github.com:prometheus/prometheus into document-and-test-relabel-memory-reuse
Signed-off-by: Douglas Camata <159076+douglascamata@users.noreply.github.com>
This commit is contained in:
commit
47819fef01
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
|
@ -2,6 +2,6 @@
|
||||||
/web/ui/module @juliusv @nexucis
|
/web/ui/module @juliusv @nexucis
|
||||||
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
||||||
/discovery/kubernetes @brancz
|
/discovery/kubernetes @brancz
|
||||||
/tsdb @codesome
|
/tsdb @jesusvazquez
|
||||||
/promql @codesome @roidelapluie
|
/promql @roidelapluie
|
||||||
/cmd/promtool @dgl
|
/cmd/promtool @dgl
|
||||||
|
|
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,26 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project.
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Please do *NOT* ask support questions in Github issues.
|
|
||||||
|
|
||||||
If your issue is not a feature request or bug report use our
|
|
||||||
community support.
|
|
||||||
|
|
||||||
https://prometheus.io/community/
|
|
||||||
|
|
||||||
There is also commercial support available.
|
|
||||||
|
|
||||||
https://prometheus.io/support-training/
|
|
||||||
|
|
||||||
-->
|
|
||||||
## Proposal
|
|
||||||
**Use case. Why is this important?**
|
|
||||||
|
|
||||||
*“Nice to have” is not a good use case. :)*
|
|
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
description: Suggest an idea for this project.
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: >-
|
||||||
|
Please do *NOT* ask support questions in Github issues.
|
||||||
|
|
||||||
|
|
||||||
|
If your issue is not a feature request or bug report use
|
||||||
|
our [community support](https://prometheus.io/community/).
|
||||||
|
|
||||||
|
|
||||||
|
There is also [commercial
|
||||||
|
support](https://prometheus.io/support-training/) available.
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Proposal
|
||||||
|
description: Use case. Why is this important?
|
||||||
|
placeholder: “Nice to have” is not a good use case. :)
|
||||||
|
validations:
|
||||||
|
required: true
|
23
.github/actions/build/action.yml
vendored
23
.github/actions/build/action.yml
vendored
|
@ -1,23 +0,0 @@
|
||||||
name: Build
|
|
||||||
inputs:
|
|
||||||
thread:
|
|
||||||
type: integer
|
|
||||||
description: Current thread
|
|
||||||
required: true
|
|
||||||
default: 3
|
|
||||||
parallelism:
|
|
||||||
type: integer
|
|
||||||
description: Number of builds to do in parallel
|
|
||||||
default: 3
|
|
||||||
promu_opts:
|
|
||||||
type: string
|
|
||||||
description: Options to pass to promu
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: ./.github/actions/setup_environment
|
|
||||||
- run: ~/go/bin/promu crossbuild -v --parallelism ${{ inputs.parallelism }} --parallelism-thread ${{ inputs.thread }} ${{ inputs.promu_opts }}
|
|
||||||
shell: bash
|
|
||||||
- uses: ./.github/actions/save_artifacts
|
|
||||||
with:
|
|
||||||
directory: .build
|
|
20
.github/actions/check_proto/action.yml
vendored
20
.github/actions/check_proto/action.yml
vendored
|
@ -1,20 +0,0 @@
|
||||||
name: Check proto files
|
|
||||||
inputs:
|
|
||||||
version:
|
|
||||||
type: string
|
|
||||||
description: Protoc version
|
|
||||||
default: "3.5.1"
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
env
|
|
||||||
set -x
|
|
||||||
curl -s -L https://github.com/protocolbuffers/protobuf/releases/download/v${{ inputs.version }}/protoc-${{ inputs.version }}-linux-x86_64.zip > /tmp/protoc.zip
|
|
||||||
unzip -d /tmp /tmp/protoc.zip
|
|
||||||
chmod +x /tmp/bin/protoc
|
|
||||||
export PATH=/tmp/bin:$PATH
|
|
||||||
make proto
|
|
||||||
shell: bash
|
|
||||||
- run: git diff --exit-code
|
|
||||||
shell: bash
|
|
47
.github/actions/publish_images/action.yml
vendored
47
.github/actions/publish_images/action.yml
vendored
|
@ -1,47 +0,0 @@
|
||||||
name: Publish image
|
|
||||||
inputs:
|
|
||||||
registry:
|
|
||||||
type: string
|
|
||||||
description: Docker registry
|
|
||||||
organization:
|
|
||||||
type: string
|
|
||||||
description: Organization
|
|
||||||
login:
|
|
||||||
type: string
|
|
||||||
description: Username
|
|
||||||
password:
|
|
||||||
type: string
|
|
||||||
description: Password
|
|
||||||
dockerfile_path:
|
|
||||||
description: Path to Dockerfile
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
dockerbuild_context:
|
|
||||||
description: Path to Dockerbuild context
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
container_image_name:
|
|
||||||
description: Name of the container image
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- if: inputs.dockerfile_path != ''
|
|
||||||
run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- if: inputs.container_image_name != ''
|
|
||||||
run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- if: inputs.dockerbuild_context != ''
|
|
||||||
run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
touch /tmp/tmp-profile
|
|
||||||
. /tmp/tmp-profile
|
|
||||||
make docker DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
docker images
|
|
||||||
echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }}
|
|
||||||
make docker-publish DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
make docker-manifest DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
shell: bash
|
|
43
.github/actions/publish_main/action.yml
vendored
43
.github/actions/publish_main/action.yml
vendored
|
@ -1,43 +0,0 @@
|
||||||
name: Publish image
|
|
||||||
inputs:
|
|
||||||
docker_hub_organization:
|
|
||||||
type: string
|
|
||||||
description: DockerHub organization
|
|
||||||
default: prom
|
|
||||||
docker_hub_login:
|
|
||||||
type: string
|
|
||||||
description: DockerHub username
|
|
||||||
docker_hub_password:
|
|
||||||
type: string
|
|
||||||
description: DockerHub password
|
|
||||||
quay_io_organization:
|
|
||||||
type: string
|
|
||||||
description: Quay.io organization
|
|
||||||
default: prometheus
|
|
||||||
quay_io_login:
|
|
||||||
type: string
|
|
||||||
description: Quay.io username
|
|
||||||
quay_io_password:
|
|
||||||
type: string
|
|
||||||
description: Quay.io password
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: ./.github/actions/setup_environment
|
|
||||||
with:
|
|
||||||
enable_docker_multibuild: true
|
|
||||||
- uses: ./.github/actions/restore_artifacts
|
|
||||||
- uses: ./.github/actions/publish_images
|
|
||||||
if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != ''
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
organization: ${{ inputs.docker_hub_organization }}
|
|
||||||
login: ${{ inputs.docker_hub_login }}
|
|
||||||
password: ${{ inputs.docker_hub_password }}
|
|
||||||
- uses: ./.github/actions/publish_images
|
|
||||||
if: inputs.quay_io_organization != '' && inputs.quay_io_login != ''
|
|
||||||
with:
|
|
||||||
registry: quay.io
|
|
||||||
organization: ${{ inputs.quay_io_organization }}
|
|
||||||
login: ${{ inputs.quay_io_login }}
|
|
||||||
password: ${{ inputs.quay_io_password }}
|
|
54
.github/actions/publish_release/action.yml
vendored
54
.github/actions/publish_release/action.yml
vendored
|
@ -1,54 +0,0 @@
|
||||||
name: Publish image
|
|
||||||
inputs:
|
|
||||||
docker_hub_organization:
|
|
||||||
type: string
|
|
||||||
description: DockerHub organization
|
|
||||||
default: prom
|
|
||||||
docker_hub_login:
|
|
||||||
type: string
|
|
||||||
description: DockerHub username
|
|
||||||
docker_hub_password:
|
|
||||||
type: string
|
|
||||||
description: DockerHub password
|
|
||||||
quay_io_organization:
|
|
||||||
type: string
|
|
||||||
description: Quay.io organization
|
|
||||||
default: prometheus
|
|
||||||
quay_io_login:
|
|
||||||
type: string
|
|
||||||
description: Quay.io username
|
|
||||||
quay_io_password:
|
|
||||||
type: string
|
|
||||||
description: Quay.io password
|
|
||||||
github_token:
|
|
||||||
type: string
|
|
||||||
description: Github Token
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: ./.github/actions/setup_environment
|
|
||||||
with:
|
|
||||||
enable_docker_multibuild: true
|
|
||||||
- uses: ./.github/actions/restore_artifacts
|
|
||||||
- run: ~/go/bin/promu crossbuild tarballs
|
|
||||||
shell: bash
|
|
||||||
- run: ~/go/bin/promu checksum .tarballs
|
|
||||||
shell: bash
|
|
||||||
- run: ~/go/bin/promu release .tarballs
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ inputs.github_token }}
|
|
||||||
- uses: ./.github/actions/publish_release_images
|
|
||||||
if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != ''
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
organization: ${{ inputs.docker_hub_organization }}
|
|
||||||
login: ${{ inputs.docker_hub_login }}
|
|
||||||
password: ${{ inputs.docker_hub_password }}
|
|
||||||
- uses: ./.github/actions/publish_release_images
|
|
||||||
if: inputs.quay_io_organization != '' && inputs.quay_io_login != ''
|
|
||||||
with:
|
|
||||||
registry: quay.io
|
|
||||||
organization: ${{ inputs.quay_io_organization }}
|
|
||||||
login: ${{ inputs.quay_io_login }}
|
|
||||||
password: ${{ inputs.quay_io_password }}
|
|
|
@ -1,53 +0,0 @@
|
||||||
name: Publish release image
|
|
||||||
inputs:
|
|
||||||
registry:
|
|
||||||
type: string
|
|
||||||
description: Docker registry
|
|
||||||
organization:
|
|
||||||
type: string
|
|
||||||
description: Organization
|
|
||||||
login:
|
|
||||||
type: string
|
|
||||||
description: Username
|
|
||||||
password:
|
|
||||||
type: string
|
|
||||||
description: Password
|
|
||||||
dockerfile_path:
|
|
||||||
description: Path to Dockerfile
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
dockerbuild_context:
|
|
||||||
description: Path to Dockerbuild context
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
container_image_name:
|
|
||||||
description: Name of the container image
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- if: inputs.dockerfile_path != ''
|
|
||||||
run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- if: inputs.container_image_name != ''
|
|
||||||
run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- if: inputs.dockerbuild_context != ''
|
|
||||||
run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
current_tag=${GITHUB_REF#refs/*/}
|
|
||||||
touch /tmp/tmp-profile
|
|
||||||
. /tmp/tmp-profile
|
|
||||||
make docker DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
docker images
|
|
||||||
echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }}
|
|
||||||
make docker-publish DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
make docker-manifest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
if [[ "$current_tag" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then
|
|
||||||
make docker-tag-latest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
make docker-publish DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
make docker-manifest DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }}
|
|
||||||
fi
|
|
||||||
shell: bash
|
|
19
.github/actions/restore_artifacts/action.yml
vendored
19
.github/actions/restore_artifacts/action.yml
vendored
|
@ -1,19 +0,0 @@
|
||||||
# Restore artifacts created by save_artifacts.
|
|
||||||
# Tar is used because the default actions do not preserve directory structure
|
|
||||||
# and file mode.
|
|
||||||
name: Restore artifacts
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Download all workflow run artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: artifact
|
|
||||||
path: .artifacts
|
|
||||||
- run: |
|
|
||||||
for tar in .artifacts/*.tar
|
|
||||||
do
|
|
||||||
tar xvf $tar
|
|
||||||
done
|
|
||||||
rm -v .artifacts/*.tar
|
|
||||||
shell: bash
|
|
17
.github/actions/save_artifacts/action.yml
vendored
17
.github/actions/save_artifacts/action.yml
vendored
|
@ -1,17 +0,0 @@
|
||||||
# Tar is used because the default actions do not preserve directory structure
|
|
||||||
# and file mode.
|
|
||||||
name: Save artifacts
|
|
||||||
inputs:
|
|
||||||
directory:
|
|
||||||
type: string
|
|
||||||
description: Path of the directory to save
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
tar cvf artifact.tar ${{ inputs.directory }}
|
|
||||||
mv artifact.tar artifact-$(sha1sum artifact.tar|awk '{ print $1 }').tar
|
|
||||||
shell: bash
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
path: artifact-*.tar
|
|
43
.github/actions/setup_environment/action.yml
vendored
43
.github/actions/setup_environment/action.yml
vendored
|
@ -1,43 +0,0 @@
|
||||||
name: Setup environment
|
|
||||||
inputs:
|
|
||||||
enable_go:
|
|
||||||
type: boolean
|
|
||||||
description: Whether to enable go specific features, such as caching.
|
|
||||||
default: true
|
|
||||||
enable_npm:
|
|
||||||
type: boolean
|
|
||||||
description: Whether to enable npm specific features, such as caching.
|
|
||||||
default: false
|
|
||||||
enable_docker_multibuild:
|
|
||||||
type: boolean
|
|
||||||
description: Whether to enable multibuild docker
|
|
||||||
default: false
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
if: inputs.enable_go == 'true'
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cache/go-build
|
|
||||||
~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
if: inputs.enable_npm == 'true'
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.npm
|
|
||||||
key: ${{ runner.os }}-npm-${{ hashFiles('web/ui/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-npm-
|
|
||||||
- run: make promu
|
|
||||||
shell: bash
|
|
||||||
if: inputs.enable_go == 'true'
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
if: inputs.enable_docker_multibuild == 'true'
|
|
||||||
- name: Set up buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
if: inputs.enable_docker_multibuild == 'true'
|
|
6
.github/workflows/buf-lint.yml
vendored
6
.github/workflows/buf-lint.yml
vendored
|
@ -1,5 +1,5 @@
|
||||||
name: buf.build
|
name: buf.build
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- ".github/workflows/buf-lint.yml"
|
- ".github/workflows/buf-lint.yml"
|
||||||
|
@ -10,7 +10,9 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
- uses: bufbuild/buf-setup-action@v1.23.1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@v1
|
- uses: bufbuild/buf-lint-action@v1
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
|
|
7
.github/workflows/buf.yml
vendored
7
.github/workflows/buf.yml
vendored
|
@ -1,5 +1,5 @@
|
||||||
name: buf.build
|
name: buf.build
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
@ -7,9 +7,12 @@ jobs:
|
||||||
buf:
|
buf:
|
||||||
name: lint and publish
|
name: lint and publish
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
- uses: bufbuild/buf-setup-action@v1.23.1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@v1
|
- uses: bufbuild/buf-lint-action@v1
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
|
|
49
.github/workflows/ci.yml
vendored
49
.github/workflows/ci.yml
vendored
|
@ -1,8 +1,9 @@
|
||||||
---
|
---
|
||||||
name: CI
|
name: CI
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test_go:
|
test_go:
|
||||||
name: Go tests
|
name: Go tests
|
||||||
|
@ -10,15 +11,18 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.19-base
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/setup_environment
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||||
|
- run: go test --tags=stringlabels ./...
|
||||||
|
- run: GOARCH=386 go test ./cmd/prometheus
|
||||||
- run: make -C documentation/examples/remote_storage
|
- run: make -C documentation/examples/remote_storage
|
||||||
- run: make -C documentation/examples
|
- run: make -C documentation/examples
|
||||||
- uses: ./.github/actions/check_proto
|
- uses: ./.github/promci/actions/check_proto
|
||||||
with:
|
with:
|
||||||
version: "3.15.8"
|
version: "3.15.8"
|
||||||
|
|
||||||
|
@ -28,18 +32,19 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.19-base
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/setup_environment
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
with:
|
with:
|
||||||
enable_go: false
|
enable_go: false
|
||||||
enable_npm: true
|
enable_npm: true
|
||||||
- run: make assets-tarball
|
- run: make assets-tarball
|
||||||
- run: make ui-lint
|
- run: make ui-lint
|
||||||
- run: make ui-test
|
- run: make ui-test
|
||||||
- uses: ./.github/actions/save_artifacts
|
- uses: ./.github/promci/actions/save_artifacts
|
||||||
with:
|
with:
|
||||||
directory: .tarballs
|
directory: .tarballs
|
||||||
|
|
||||||
|
@ -48,9 +53,9 @@ jobs:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.19 <1.20'
|
go-version: '>=1.20 <1.21'
|
||||||
- run: |
|
- run: |
|
||||||
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||||
go test $TestTargets -vet=off -v
|
go test $TestTargets -vet=off -v
|
||||||
|
@ -61,7 +66,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# The go verson in this image should be N-1 wrt test_go.
|
# The go verson in this image should be N-1 wrt test_go.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.18-base
|
image: quay.io/prometheus/golang-builder:1.19-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: make build
|
- run: make build
|
||||||
|
@ -100,7 +105,8 @@ jobs:
|
||||||
thread: [ 0, 1, 2 ]
|
thread: [ 0, 1, 2 ]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/build
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||||
parallelism: 3
|
parallelism: 3
|
||||||
|
@ -122,7 +128,8 @@ jobs:
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/build
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
parallelism: 12
|
parallelism: 12
|
||||||
thread: ${{ matrix.thread }}
|
thread: ${{ matrix.thread }}
|
||||||
|
@ -133,16 +140,17 @@ jobs:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '<1.19'
|
go-version: 1.20.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@v3.2.0
|
uses: golangci/golangci-lint-action@v3.6.0
|
||||||
with:
|
with:
|
||||||
version: v1.49.0
|
args: --verbose
|
||||||
|
version: v1.53.3
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
|
@ -156,7 +164,8 @@ jobs:
|
||||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/publish_main
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/publish_main
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
docker_hub_password: ${{ secrets.docker_hub_password }}
|
docker_hub_password: ${{ secrets.docker_hub_password }}
|
||||||
|
@ -169,7 +178,8 @@ jobs:
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: ./.github/actions/publish_release
|
- uses: prometheus/promci@v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/publish_release
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
docker_hub_password: ${{ secrets.docker_hub_password }}
|
docker_hub_password: ${{ secrets.docker_hub_password }}
|
||||||
|
@ -183,12 +193,13 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
- uses: prometheus/promci@v0.1.0
|
||||||
- name: Install nodejs
|
- name: Install nodejs
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v3
|
||||||
with:
|
with:
|
||||||
node-version-file: "web/ui/.nvmrc"
|
node-version-file: "web/ui/.nvmrc"
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
- uses: actions/cache@v3.0.9
|
- uses: actions/cache@v3.3.1
|
||||||
with:
|
with:
|
||||||
path: ~/.npm
|
path: ~/.npm
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
name: "CodeQL"
|
name: "CodeQL"
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "26 14 * * 1"
|
- cron: "26 14 * * 1"
|
||||||
|
@ -21,9 +21,9 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.19 <1.20'
|
go-version: '>=1.20 <1.21'
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v2
|
||||||
|
|
2
.github/workflows/funcbench.yml
vendored
2
.github/workflows/funcbench.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
types: [funcbench_start]
|
types: [funcbench_start]
|
||||||
name: Funcbench Workflow
|
name: Funcbench Workflow
|
||||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -1,5 +1,5 @@
|
||||||
name: CIFuzz
|
name: CIFuzz
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
jobs:
|
jobs:
|
||||||
Fuzzing:
|
Fuzzing:
|
||||||
|
|
5
.github/workflows/lock.yml
vendored
5
.github/workflows/lock.yml
vendored
|
@ -1,6 +1,6 @@
|
||||||
name: 'Lock Threads'
|
name: 'Lock Threads'
|
||||||
|
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '13 23 * * *'
|
- cron: '13 23 * * *'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -14,8 +14,9 @@ concurrency:
|
||||||
jobs:
|
jobs:
|
||||||
action:
|
action:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: dessant/lock-threads@v3
|
- uses: dessant/lock-threads@v4
|
||||||
with:
|
with:
|
||||||
process-only: 'issues'
|
process-only: 'issues'
|
||||||
issue-inactive-days: '180'
|
issue-inactive-days: '180'
|
||||||
|
|
2
.github/workflows/prombench.yml
vendored
2
.github/workflows/prombench.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
types: [prombench_start, prombench_restart, prombench_stop]
|
types: [prombench_start, prombench_restart, prombench_stop]
|
||||||
name: Prombench Workflow
|
name: Prombench Workflow
|
||||||
|
|
3
.github/workflows/repo_sync.yml
vendored
3
.github/workflows/repo_sync.yml
vendored
|
@ -1,11 +1,12 @@
|
||||||
---
|
---
|
||||||
name: Sync repo files
|
name: Sync repo files
|
||||||
on: # yamllint disable-line rule:truthy
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '44 17 * * *'
|
- cron: '44 17 * * *'
|
||||||
jobs:
|
jobs:
|
||||||
repo_sync:
|
repo_sync:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder
|
image: quay.io/prometheus/golang-builder
|
||||||
steps:
|
steps:
|
||||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -19,7 +19,7 @@ benchmark.txt
|
||||||
!/.promu.yml
|
!/.promu.yml
|
||||||
!/.golangci.yml
|
!/.golangci.yml
|
||||||
/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter
|
/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter
|
||||||
/documentation/examples/remote_storage/example_write_adapter/example_writer_adapter
|
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
||||||
|
|
||||||
npm_licenses.tar.bz2
|
npm_licenses.tar.bz2
|
||||||
/web/ui/static/react
|
/web/ui/static/react
|
||||||
|
@ -28,3 +28,6 @@ npm_licenses.tar.bz2
|
||||||
/.build
|
/.build
|
||||||
|
|
||||||
/**/node_modules
|
/**/node_modules
|
||||||
|
|
||||||
|
# Ignore parser debug
|
||||||
|
y.output
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
run:
|
run:
|
||||||
deadline: 5m
|
timeout: 15m
|
||||||
skip-files:
|
skip-files:
|
||||||
# Skip autogenerated files.
|
# Skip autogenerated files.
|
||||||
- ^.*\.(pb|y)\.go$
|
- ^.*\.(pb|y)\.go$
|
||||||
|
@ -10,31 +10,60 @@ output:
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
|
- gocritic
|
||||||
- gofumpt
|
- gofumpt
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
|
||||||
- misspell
|
- misspell
|
||||||
|
- predeclared
|
||||||
|
- revive
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
- linters:
|
||||||
|
- gocritic
|
||||||
|
text: "appendAssign"
|
||||||
- path: _test.go
|
- path: _test.go
|
||||||
linters:
|
linters:
|
||||||
- errcheck
|
- errcheck
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
depguard:
|
depguard:
|
||||||
list-type: blacklist
|
rules:
|
||||||
include-go-root: true
|
main:
|
||||||
packages-with-error-message:
|
deny:
|
||||||
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
|
- pkg: "sync/atomic"
|
||||||
- github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
desc: "Use go.uber.org/atomic instead of sync/atomic"
|
||||||
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
- pkg: "github.com/stretchr/testify/assert"
|
||||||
- io/ioutil: "Use corresponding 'os' or 'io' functions instead."
|
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
||||||
- regexp: "Use github.com/grafana/regexp instead of regexp"
|
- pkg: "github.com/go-kit/kit/log"
|
||||||
|
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||||
|
- pkg: "io/ioutil"
|
||||||
|
desc: "Use corresponding 'os' or 'io' functions instead."
|
||||||
|
- pkg: "regexp"
|
||||||
|
desc: "Use github.com/grafana/regexp instead of regexp"
|
||||||
errcheck:
|
errcheck:
|
||||||
exclude: scripts/errcheck_excludes.txt
|
exclude-functions:
|
||||||
|
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
|
||||||
|
- io.Copy
|
||||||
|
# The next two are used in HTTP handlers, any error is handled by the server itself.
|
||||||
|
- io.WriteString
|
||||||
|
- (net/http.ResponseWriter).Write
|
||||||
|
# No need to check for errors on server's shutdown.
|
||||||
|
- (*net/http.Server).Shutdown
|
||||||
|
# Never check for logger errors.
|
||||||
|
- (github.com/go-kit/log.Logger).Log
|
||||||
|
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||||
|
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: github.com/prometheus/prometheus
|
local-prefixes: github.com/prometheus/prometheus
|
||||||
gofumpt:
|
gofumpt:
|
||||||
extra-rules: true
|
extra-rules: true
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
||||||
|
- name: unused-parameter
|
||||||
|
severity: warning
|
||||||
|
disabled: true
|
||||||
|
|
12
.promu.yml
12
.promu.yml
|
@ -1,7 +1,7 @@
|
||||||
go:
|
go:
|
||||||
# Whenever the Go version is updated here,
|
# Whenever the Go version is updated here,
|
||||||
# .circle/config.yml should also be updated.
|
# .circle/config.yml should also be updated.
|
||||||
version: 1.19
|
version: 1.20
|
||||||
repository:
|
repository:
|
||||||
path: github.com/prometheus/prometheus
|
path: github.com/prometheus/prometheus
|
||||||
build:
|
build:
|
||||||
|
@ -10,7 +10,15 @@ build:
|
||||||
path: ./cmd/prometheus
|
path: ./cmd/prometheus
|
||||||
- name: promtool
|
- name: promtool
|
||||||
path: ./cmd/promtool
|
path: ./cmd/promtool
|
||||||
flags: -a -tags netgo,builtinassets
|
tags:
|
||||||
|
all:
|
||||||
|
- netgo
|
||||||
|
- builtinassets
|
||||||
|
- stringlabels
|
||||||
|
windows:
|
||||||
|
- builtinassets
|
||||||
|
- stringlabels
|
||||||
|
flags: -a
|
||||||
ldflags: |
|
ldflags: |
|
||||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||||
|
|
|
@ -20,9 +20,4 @@ rules:
|
||||||
config/testdata/section_key_dup.bad.yml
|
config/testdata/section_key_dup.bad.yml
|
||||||
line-length: disable
|
line-length: disable
|
||||||
truthy:
|
truthy:
|
||||||
ignore: |
|
check-keys: false
|
||||||
.github/workflows/codeql-analysis.yml
|
|
||||||
.github/workflows/funcbench.yml
|
|
||||||
.github/workflows/fuzzing.yml
|
|
||||||
.github/workflows/prombench.yml
|
|
||||||
.github/workflows/golangci-lint.yml
|
|
||||||
|
|
168
CHANGELOG.md
168
CHANGELOG.md
|
@ -1,5 +1,173 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
|
||||||
|
## 2.45.0 / 2023-06-23
|
||||||
|
|
||||||
|
This release is a LTS (Long-Term Support) release of Prometheus and will
|
||||||
|
receive security, documentation and bugfix patches for at least 12 months.
|
||||||
|
Please read more about our LTS release cycle at
|
||||||
|
<https://prometheus.io/docs/introduction/release-cycle/>.
|
||||||
|
|
||||||
|
* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336
|
||||||
|
* [FEATURE] Config: Add limits to global config. #12126
|
||||||
|
* [FEATURE] Consul SD: Added support for `path_prefix`. #12372
|
||||||
|
* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350
|
||||||
|
* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262
|
||||||
|
* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031
|
||||||
|
* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944
|
||||||
|
* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055
|
||||||
|
* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254
|
||||||
|
* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297
|
||||||
|
* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185
|
||||||
|
* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357
|
||||||
|
* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329
|
||||||
|
* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245
|
||||||
|
* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349
|
||||||
|
|
||||||
|
## 2.44.0 / 2023-05-13
|
||||||
|
|
||||||
|
This version is built with Go tag `stringlabels`, to use the smaller data
|
||||||
|
structure for Labels that was optional in the previous release. For more
|
||||||
|
details about this code change see #10991.
|
||||||
|
|
||||||
|
* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203
|
||||||
|
* [FEATURE] Remote-read: Handle native histograms. #12085, #12192
|
||||||
|
* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096
|
||||||
|
* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251
|
||||||
|
* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326
|
||||||
|
* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084
|
||||||
|
* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190
|
||||||
|
* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270
|
||||||
|
* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272
|
||||||
|
* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207
|
||||||
|
* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135
|
||||||
|
* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179
|
||||||
|
* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127
|
||||||
|
|
||||||
|
## 2.43.1 / 2023-05-03
|
||||||
|
|
||||||
|
* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322
|
||||||
|
|
||||||
|
## 2.43.0 / 2023-03-21
|
||||||
|
|
||||||
|
We are working on some performance improvements in Prometheus, which are only
|
||||||
|
built into Prometheus when compiling it using the Go tag `stringlabels`
|
||||||
|
(therefore they are not shipped in the default binaries). It uses a data
|
||||||
|
structure for labels that uses a single string to hold all the label/values,
|
||||||
|
resulting in a smaller heap size and some speedups in most cases. We would like
|
||||||
|
to encourage users who are interested in these improvements to help us measure
|
||||||
|
the gains on their production architecture. We are providing release artefacts
|
||||||
|
`2.43.0+stringlabels` and Docker images tagged `v2.43.0-stringlabels` with those
|
||||||
|
improvements for testing. #10991
|
||||||
|
|
||||||
|
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
||||||
|
* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
|
||||||
|
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
|
||||||
|
* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098
|
||||||
|
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
|
||||||
|
* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897
|
||||||
|
* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984
|
||||||
|
* [ENHANCEMENT] TSDB: Add metrics for head chunks and WAL folders size. #12013
|
||||||
|
* [ENHANCEMENT] TSDB: Automatically remove incorrect snapshot with index that is ahead of WAL. #11859
|
||||||
|
* [ENHANCEMENT] TSDB: Improve Prometheus parser error outputs to be more comprehensible. #11682
|
||||||
|
* [ENHANCEMENT] UI: Scope `group by` labels to metric in autocompletion. #11914
|
||||||
|
* [BUGFIX] Scrape: Fix `prometheus_target_scrape_pool_target_limit` metric not set before reloading. #12002
|
||||||
|
* [BUGFIX] TSDB: Correctly update `prometheus_tsdb_head_chunks_removed_total` and `prometheus_tsdb_head_chunks` metrics when reading WAL. #11858
|
||||||
|
* [BUGFIX] TSDB: Use the correct unit (seconds) when recording out-of-order append deltas in the `prometheus_tsdb_sample_ooo_delta` metric. #12004
|
||||||
|
|
||||||
|
## 2.42.0 / 2023-01-31
|
||||||
|
|
||||||
|
This release comes with a bunch of feature coverage for native histograms and breaking changes.
|
||||||
|
|
||||||
|
If you are trying native histograms already, we recommend you remove the `wal` directory when upgrading.
|
||||||
|
Because the old WAL record for native histograms is not backward compatible in v2.42.0, this will lead to some data loss for the latest data.
|
||||||
|
|
||||||
|
Additionally, if you scrape "float histograms" or use recording rules on native histograms in v2.42.0 (which writes float histograms),
|
||||||
|
it is a one-way street since older versions do not support float histograms.
|
||||||
|
|
||||||
|
* [CHANGE] **breaking** TSDB: Changed WAL record format for the experimental native histograms. #11783
|
||||||
|
* [FEATURE] Add 'keep_firing_for' field to alerting rules. #11827
|
||||||
|
* [FEATURE] Promtool: Add support of selecting timeseries for TSDB dump. #11872
|
||||||
|
* [ENHANCEMENT] Agent: Native histogram support. #11842
|
||||||
|
* [ENHANCEMENT] Rules: Support native histograms in recording rules. #11838
|
||||||
|
* [ENHANCEMENT] SD: Add container ID as a meta label for pod targets for Kubernetes. #11844
|
||||||
|
* [ENHANCEMENT] SD: Add VM size label to azure service discovery. #11650
|
||||||
|
* [ENHANCEMENT] Support native histograms in federation. #11830
|
||||||
|
* [ENHANCEMENT] TSDB: Add gauge histogram support. #11783 #11840 #11814
|
||||||
|
* [ENHANCEMENT] TSDB/Scrape: Support FloatHistogram that represents buckets as float64 values. #11522 #11817 #11716
|
||||||
|
* [ENHANCEMENT] UI: Show individual scrape pools on /targets page. #11142
|
||||||
|
|
||||||
|
## 2.41.0 / 2022-12-20
|
||||||
|
|
||||||
|
* [FEATURE] Relabeling: Add `keepequal` and `dropequal` relabel actions. #11564
|
||||||
|
* [FEATURE] Add support for HTTP proxy headers. #11712
|
||||||
|
* [ENHANCEMENT] Reload private certificates when changed on disk. #11685
|
||||||
|
* [ENHANCEMENT] Add `max_version` to specify maximum TLS version in `tls_config`. #11685
|
||||||
|
* [ENHANCEMENT] Add `goos` and `goarch` labels to `prometheus_build_info`. #11685
|
||||||
|
* [ENHANCEMENT] SD: Add proxy support for EC2 and LightSail SDs #11611
|
||||||
|
* [ENHANCEMENT] SD: Add new metric `prometheus_sd_file_watcher_errors_total`. #11066
|
||||||
|
* [ENHANCEMENT] Remote Read: Use a pool to speed up marshalling. #11357
|
||||||
|
* [ENHANCEMENT] TSDB: Improve handling of tombstoned chunks in iterators. #11632
|
||||||
|
* [ENHANCEMENT] TSDB: Optimize postings offset table reading. #11535
|
||||||
|
* [BUGFIX] Scrape: Validate the metric name, label names, and label values after relabeling. #11074
|
||||||
|
* [BUGFIX] Remote Write receiver and rule manager: Fix error handling. #11727
|
||||||
|
|
||||||
|
## 2.40.7 / 2022-12-14
|
||||||
|
|
||||||
|
* [BUGFIX] Use Windows native DNS resolver. #11704
|
||||||
|
* [BUGFIX] TSDB: Fix queries involving negative buckets of native histograms. #11699
|
||||||
|
|
||||||
|
## 2.40.6 / 2022-12-09
|
||||||
|
|
||||||
|
* [SECURITY] Security upgrade from go and upstream dependencies that include
|
||||||
|
security fixes to the net/http and os packages. #11691
|
||||||
|
|
||||||
|
## 2.40.5 / 2022-12-01
|
||||||
|
|
||||||
|
* [BUGFIX] TSDB: Fix queries involving native histograms due to improper reset of iterators. #11643
|
||||||
|
|
||||||
|
## 2.40.4 / 2022-11-29
|
||||||
|
|
||||||
|
* [SECURITY] Fix basic authentication bypass vulnerability (CVE-2022-46146). GHSA-4v48-4q5m-8vx4
|
||||||
|
|
||||||
|
## 2.40.3 / 2022-11-23
|
||||||
|
|
||||||
|
* [BUGFIX] TSDB: Fix compaction after a deletion is called. #11623
|
||||||
|
|
||||||
|
## 2.40.2 / 2022-11-16
|
||||||
|
|
||||||
|
* [BUGFIX] UI: Fix black-on-black metric name color in dark mode. #11572
|
||||||
|
|
||||||
|
## 2.40.1 / 2022-11-09
|
||||||
|
|
||||||
|
* [BUGFIX] TSDB: Fix alignment for atomic int64 for 32 bit architecture. #11547
|
||||||
|
* [BUGFIX] Scrape: Fix accept headers. #11552
|
||||||
|
|
||||||
|
## 2.40.0 / 2022-11-08
|
||||||
|
|
||||||
|
This release introduces an experimental, native way of representing and storing histograms.
|
||||||
|
|
||||||
|
It can be enabled in Prometheus via `--enable-feature=native-histograms` to accept native histograms.
|
||||||
|
Enabling native histograms will also switch the preferred exposition format to protobuf.
|
||||||
|
|
||||||
|
To instrument your application with native histograms, use the `main` branch of `client_golang` (this will change for the final release when v1.14.0 of client_golang will be out), and set the `NativeHistogramBucketFactor` in your `HistogramOpts` (`1.1` is a good starting point).
|
||||||
|
Your existing histograms won't switch to native histograms until `NativeHistogramBucketFactor` is set.
|
||||||
|
|
||||||
|
* [FEATURE] Add **experimental** support for native histograms. Enable with the flag `--enable-feature=native-histograms`. #11447
|
||||||
|
* [FEATURE] SD: Add service discovery for OVHcloud. #10802
|
||||||
|
* [ENHANCEMENT] Kubernetes SD: Use protobuf encoding. #11353
|
||||||
|
* [ENHANCEMENT] TSDB: Use golang.org/x/exp/slices for improved sorting speed. #11054 #11318 #11380
|
||||||
|
* [ENHANCEMENT] Consul SD: Add enterprise admin partitions. Adds `__meta_consul_partition` label. Adds `partition` config in `consul_sd_config`. #11482
|
||||||
|
* [BUGFIX] API: Fix API error codes for `/api/v1/labels` and `/api/v1/series`. #11356
|
||||||
|
|
||||||
|
## 2.39.2 / 2022-11-09
|
||||||
|
|
||||||
|
* [BUGFIX] TSDB: Fix alignment for atomic int64 for 32 bit architecture. #11547
|
||||||
|
|
||||||
|
## 2.39.1 / 2022-10-07
|
||||||
|
|
||||||
|
* [BUGFIX] Rules: Fix notifier relabel changing the labels on active alerts. #11427
|
||||||
|
|
||||||
## 2.39.0 / 2022-10-05
|
## 2.39.0 / 2022-10-05
|
||||||
|
|
||||||
* [FEATURE] **experimental** TSDB: Add support for ingesting out-of-order samples. This is configured via `out_of_order_time_window` field in the config file; check config file docs for more info. #11075
|
* [FEATURE] **experimental** TSDB: Add support for ingesting out-of-order samples. This is configured via `out_of_order_time_window` field in the config file; check config file docs for more info. #11075
|
||||||
|
|
|
@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Pick the latest tagged release.
|
# Pick the latest tagged release.
|
||||||
go install example.com/some/module/pkg@latest
|
go get example.com/some/module/pkg@latest
|
||||||
|
|
||||||
# Pick a specific version.
|
# Pick a specific version.
|
||||||
go install example.com/some/module/pkg@vX.Y.Z
|
go get example.com/some/module/pkg@vX.Y.Z
|
||||||
```
|
```
|
||||||
|
|
||||||
Tidy up the `go.mod` and `go.sum` files:
|
Tidy up the `go.mod` and `go.sum` files:
|
||||||
|
@ -78,3 +78,20 @@ GO111MODULE=on go mod tidy
|
||||||
```
|
```
|
||||||
|
|
||||||
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
|
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
|
||||||
|
|
||||||
|
## Working with the PromQL parser
|
||||||
|
|
||||||
|
The PromQL parser grammar is located in `promql/parser/generated_parser.y` and it can be built using `make parser`.
|
||||||
|
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
|
||||||
|
|
||||||
|
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
|
||||||
|
can modify the `./promql/parser/generated_parser.y.go` manually.
|
||||||
|
|
||||||
|
```golang
|
||||||
|
// As of writing this was somewhere around line 600.
|
||||||
|
var (
|
||||||
|
yyDebug = 0 // This can be be a number 0 -> 5.
|
||||||
|
yyErrorVerbose = false // This can be set to true.
|
||||||
|
)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
|
@ -10,7 +10,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
|
||||||
* `prometheus-mixin`: Björn Rabenstein (<beorn@grafana.com> / @beorn7)
|
* `prometheus-mixin`: Björn Rabenstein (<beorn@grafana.com> / @beorn7)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||||
* `web`
|
* `web`
|
||||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||||
|
|
24
Makefile
24
Makefile
|
@ -78,6 +78,17 @@ assets-tarball: assets
|
||||||
@echo '>> packaging assets'
|
@echo '>> packaging assets'
|
||||||
scripts/package_assets.sh
|
scripts/package_assets.sh
|
||||||
|
|
||||||
|
# We only want to generate the parser when there's changes to the grammar.
|
||||||
|
.PHONY: parser
|
||||||
|
parser:
|
||||||
|
@echo ">> running goyacc to generate the .go file."
|
||||||
|
ifeq (, $(shell command -v goyacc > /dev/null))
|
||||||
|
@echo "goyacc not installed so skipping"
|
||||||
|
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
|
||||||
|
else
|
||||||
|
goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
# If we only want to only test go code we have to change the test target
|
# If we only want to only test go code we have to change the test target
|
||||||
# which is called by all.
|
# which is called by all.
|
||||||
|
@ -90,8 +101,10 @@ endif
|
||||||
.PHONY: npm_licenses
|
.PHONY: npm_licenses
|
||||||
npm_licenses: ui-install
|
npm_licenses: ui-install
|
||||||
@echo ">> bundling npm licenses"
|
@echo ">> bundling npm licenses"
|
||||||
rm -f $(REACT_APP_NPM_LICENSES_TARBALL)
|
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
|
||||||
find $(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
|
ln -s . npm_licenses
|
||||||
|
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
|
||||||
|
rm -f npm_licenses
|
||||||
|
|
||||||
.PHONY: tarball
|
.PHONY: tarball
|
||||||
tarball: npm_licenses common-tarball
|
tarball: npm_licenses common-tarball
|
||||||
|
@ -107,7 +120,7 @@ plugins/plugins.go: plugins.yml plugins/generate.go
|
||||||
plugins: plugins/plugins.go
|
plugins: plugins/plugins.go
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build: assets npm_licenses assets-compress common-build plugins
|
build: assets npm_licenses assets-compress plugins common-build
|
||||||
|
|
||||||
.PHONY: bench_tsdb
|
.PHONY: bench_tsdb
|
||||||
bench_tsdb: $(PROMU)
|
bench_tsdb: $(PROMU)
|
||||||
|
@ -120,3 +133,8 @@ bench_tsdb: $(PROMU)
|
||||||
@$(GO) tool pprof --alloc_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
@$(GO) tool pprof --alloc_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||||
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||||
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
||||||
|
|
||||||
|
.PHONY: cli-documentation
|
||||||
|
cli-documentation:
|
||||||
|
$(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md
|
||||||
|
$(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md
|
||||||
|
|
|
@ -49,19 +49,19 @@ endif
|
||||||
GOTEST := $(GO) test
|
GOTEST := $(GO) test
|
||||||
GOTEST_DIR :=
|
GOTEST_DIR :=
|
||||||
ifneq ($(CIRCLE_JOB),)
|
ifneq ($(CIRCLE_JOB),)
|
||||||
ifneq ($(shell which gotestsum),)
|
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||||
GOTEST_DIR := test-results
|
GOTEST_DIR := test-results
|
||||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.13.0
|
PROMU_VERSION ?= 0.15.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.49.0
|
GOLANGCI_LINT_VERSION ?= v1.53.3
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
|
||||||
|
|
||||||
ifeq ($(GOHOSTARCH),amd64)
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
# Only supported on amd64
|
# Only supported on amd64
|
||||||
|
@ -176,7 +178,7 @@ endif
|
||||||
.PHONY: common-yamllint
|
.PHONY: common-yamllint
|
||||||
common-yamllint:
|
common-yamllint:
|
||||||
@echo ">> running yamllint on all YAML files in the repository"
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
ifeq (, $(shell which yamllint))
|
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||||
@echo "yamllint not installed so skipping"
|
@echo "yamllint not installed so skipping"
|
||||||
else
|
else
|
||||||
yamllint .
|
yamllint .
|
||||||
|
@ -205,7 +207,7 @@ common-tarball: promu
|
||||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||||
-f $(DOCKERFILE_PATH) \
|
-f $(DOCKERFILE_PATH) \
|
||||||
--build-arg ARCH="$*" \
|
--build-arg ARCH="$*" \
|
||||||
--build-arg OS="linux" \
|
--build-arg OS="linux" \
|
||||||
|
@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-manifest
|
.PHONY: common-docker-manifest
|
||||||
common-docker-manifest:
|
common-docker-manifest:
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu: $(PROMU)
|
promu: $(PROMU)
|
||||||
|
|
20
README.md
20
README.md
|
@ -1,6 +1,13 @@
|
||||||
# Prometheus
|
<h1 align="center" style="border-bottom: none">
|
||||||
|
<a href="//prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
|
||||||
|
</h1>
|
||||||
|
|
||||||
[][circleci]
|
<p align="center">Visit <a href="//prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
|
||||||
|
examples and guides.</p>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
[](https://github.com/prometheus/prometheus/actions/workflows/ci.yml)
|
||||||
[][quay]
|
[][quay]
|
||||||
[][hub]
|
[][hub]
|
||||||
[](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
[](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
||||||
|
@ -8,8 +15,7 @@
|
||||||
[](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
[](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
||||||
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
||||||
|
|
||||||
Visit [prometheus.io](https://prometheus.io) for the full documentation,
|
</div>
|
||||||
examples and guides.
|
|
||||||
|
|
||||||
Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics
|
Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics
|
||||||
from configured targets at given intervals, evaluates rule expressions,
|
from configured targets at given intervals, evaluates rule expressions,
|
||||||
|
@ -28,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste
|
||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
|
@ -132,8 +138,6 @@ make npm_licenses
|
||||||
make common-docker-amd64
|
make common-docker-amd64
|
||||||
```
|
```
|
||||||
|
|
||||||
*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar).
|
|
||||||
|
|
||||||
## Using Prometheus as a Go Library
|
## Using Prometheus as a Go Library
|
||||||
|
|
||||||
### Remote Write
|
### Remote Write
|
||||||
|
@ -172,7 +176,6 @@ For more information on building, running, and developing on the React-based UI,
|
||||||
## More information
|
## More information
|
||||||
|
|
||||||
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
|
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
|
||||||
* You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml).
|
|
||||||
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
@ -184,5 +187,4 @@ Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CO
|
||||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).
|
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).
|
||||||
|
|
||||||
[hub]: https://hub.docker.com/r/prom/prometheus/
|
[hub]: https://hub.docker.com/r/prom/prometheus/
|
||||||
[circleci]: https://circleci.com/gh/prometheus/prometheus
|
|
||||||
[quay]: https://quay.io/repository/prometheus/prometheus
|
[quay]: https://quay.io/repository/prometheus/prometheus
|
||||||
|
|
11
RELEASE.md
11
RELEASE.md
|
@ -44,8 +44,15 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
|
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
|
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
|
||||||
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
| v2.40 | 2022-11-02 | **searching for volunteer** |
|
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
| v2.41 | 2022-12-14 | **searching for volunteer** |
|
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||||
|
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||||
|
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.47 | 2023-08-23 | **searching for volunteer** |
|
||||||
|
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// The main package for the Prometheus server executable.
|
// The main package for the Prometheus server executable.
|
||||||
|
// nolint:revive // Many unsued function arguments in this file by design.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -33,6 +34,7 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
|
@ -45,10 +47,8 @@ import (
|
||||||
promlogflag "github.com/prometheus/common/promlog/flag"
|
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||||
toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
|
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
"go.uber.org/automaxprocs/maxprocs"
|
"go.uber.org/automaxprocs/maxprocs"
|
||||||
"gopkg.in/alecthomas/kingpin.v2"
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
klogv2 "k8s.io/klog/v2"
|
klogv2 "k8s.io/klog/v2"
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/legacymanager"
|
"github.com/prometheus/prometheus/discovery/legacymanager"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/model/exemplar"
|
"github.com/prometheus/prometheus/model/exemplar"
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/metadata"
|
"github.com/prometheus/prometheus/model/metadata"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
@ -70,9 +71,10 @@ import (
|
||||||
"github.com/prometheus/prometheus/tracing"
|
"github.com/prometheus/prometheus/tracing"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/tsdb/agent"
|
"github.com/prometheus/prometheus/tsdb/agent"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
"github.com/prometheus/prometheus/util/logging"
|
"github.com/prometheus/prometheus/util/logging"
|
||||||
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
|
||||||
"github.com/prometheus/prometheus/web"
|
"github.com/prometheus/prometheus/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -195,6 +197,10 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
case "no-default-scrape-port":
|
case "no-default-scrape-port":
|
||||||
c.scrape.NoDefaultPort = true
|
c.scrape.NoDefaultPort = true
|
||||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||||
|
case "native-histograms":
|
||||||
|
c.tsdb.EnableNativeHistograms = true
|
||||||
|
c.scrape.EnableProtobufNegotiation = true
|
||||||
|
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "promql-at-modifier", "promql-negative-offset":
|
case "promql-at-modifier", "promql-negative-offset":
|
||||||
|
@ -204,6 +210,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown {
|
||||||
|
c.tsdb.EnableMemorySnapshotOnShutdown = false
|
||||||
|
level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.")
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,7 +253,10 @@ func main() {
|
||||||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
|
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
|
||||||
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
|
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
|
||||||
|
|
||||||
webConfig := toolkit_webflag.AddFlags(a)
|
webConfig := a.Flag(
|
||||||
|
"web.config.file",
|
||||||
|
"[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.",
|
||||||
|
).Default("").String()
|
||||||
|
|
||||||
a.Flag("web.read-timeout",
|
a.Flag("web.read-timeout",
|
||||||
"Maximum duration before timing out read of the request, and closing idle connections.").
|
"Maximum duration before timing out read of the request, and closing idle connections.").
|
||||||
|
@ -320,9 +335,15 @@ func main() {
|
||||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL.").
|
||||||
|
Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd))
|
||||||
|
|
||||||
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
|
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
|
||||||
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
|
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
|
||||||
|
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
|
||||||
|
|
||||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||||
|
|
||||||
|
@ -333,6 +354,9 @@ func main() {
|
||||||
agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
|
agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
|
||||||
Default("true").BoolVar(&cfg.agent.WALCompression)
|
Default("true").BoolVar(&cfg.agent.WALCompression)
|
||||||
|
|
||||||
|
agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL.").
|
||||||
|
Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd))
|
||||||
|
|
||||||
agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
|
agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
|
||||||
"The frequency at which to truncate the WAL and remove old data.").
|
"The frequency at which to truncate the WAL and remove old data.").
|
||||||
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
|
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
|
||||||
|
@ -396,14 +420,23 @@ func main() {
|
||||||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||||
|
|
||||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||||
Default("").StringsVar(&cfg.featureList)
|
Default("").StringsVar(&cfg.featureList)
|
||||||
|
|
||||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||||
|
|
||||||
|
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||||
|
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
return nil
|
||||||
|
}).Bool()
|
||||||
|
|
||||||
_, err := a.Parse(os.Args[1:])
|
_, err := a.Parse(os.Args[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
|
||||||
a.Usage(os.Args[1:])
|
a.Usage(os.Args[1:])
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
@ -456,11 +489,19 @@ func main() {
|
||||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
|
||||||
|
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||||
|
if pathErr != nil {
|
||||||
|
absPath = cfg.configFile
|
||||||
|
}
|
||||||
|
level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
if cfg.tsdb.EnableExemplarStorage {
|
if cfg.tsdb.EnableExemplarStorage {
|
||||||
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
||||||
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
||||||
}
|
}
|
||||||
cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars)
|
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
|
||||||
}
|
}
|
||||||
if cfgFile.StorageConfig.TSDBConfig != nil {
|
if cfgFile.StorageConfig.TSDBConfig != nil {
|
||||||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||||
|
@ -619,7 +660,7 @@ func main() {
|
||||||
Appendable: fanoutStorage,
|
Appendable: fanoutStorage,
|
||||||
Queryable: localStorage,
|
Queryable: localStorage,
|
||||||
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
|
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
|
||||||
NotifyFunc: sendAlerts(notifierManager, cfg.web.ExternalURL.String()),
|
NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
|
||||||
Context: ctxRule,
|
Context: ctxRule,
|
||||||
ExternalURL: cfg.web.ExternalURL,
|
ExternalURL: cfg.web.ExternalURL,
|
||||||
Registerer: prometheus.DefaultRegisterer,
|
Registerer: prometheus.DefaultRegisterer,
|
||||||
|
@ -718,7 +759,11 @@ func main() {
|
||||||
name: "scrape_sd",
|
name: "scrape_sd",
|
||||||
reloader: func(cfg *config.Config) error {
|
reloader: func(cfg *config.Config) error {
|
||||||
c := make(map[string]discovery.Configs)
|
c := make(map[string]discovery.Configs)
|
||||||
for _, v := range cfg.ScrapeConfigs {
|
scfgs, err := cfg.GetScrapeConfigs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, v := range scfgs {
|
||||||
c[v.JobName] = v.ServiceDiscoveryConfigs
|
c[v.JobName] = v.ServiceDiscoveryConfigs
|
||||||
}
|
}
|
||||||
return discoveryManagerScrape.ApplyConfig(c)
|
return discoveryManagerScrape.ApplyConfig(c)
|
||||||
|
@ -1015,6 +1060,7 @@ func main() {
|
||||||
|
|
||||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||||
localStorage.Set(db, startTimeMargin)
|
localStorage.Set(db, startTimeMargin)
|
||||||
|
db.SetWriteNotified(remoteStorage)
|
||||||
close(dbOpen)
|
close(dbOpen)
|
||||||
<-cancel
|
<-cancel
|
||||||
return nil
|
return nil
|
||||||
|
@ -1270,36 +1316,6 @@ func computeExternalURL(u, listenAddr string) (*url.URL, error) {
|
||||||
return eu, nil
|
return eu, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type sender interface {
|
|
||||||
Send(alerts ...*notifier.Alert)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendAlerts implements the rules.NotifyFunc for a Notifier.
|
|
||||||
func sendAlerts(s sender, externalURL string) rules.NotifyFunc {
|
|
||||||
return func(ctx context.Context, expr string, alerts ...*rules.Alert) {
|
|
||||||
var res []*notifier.Alert
|
|
||||||
|
|
||||||
for _, alert := range alerts {
|
|
||||||
a := ¬ifier.Alert{
|
|
||||||
StartsAt: alert.FiredAt,
|
|
||||||
Labels: alert.Labels,
|
|
||||||
Annotations: alert.Annotations,
|
|
||||||
GeneratorURL: externalURL + strutil.TableLinkForExpression(expr),
|
|
||||||
}
|
|
||||||
if !alert.ResolvedAt.IsZero() {
|
|
||||||
a.EndsAt = alert.ResolvedAt
|
|
||||||
} else {
|
|
||||||
a.EndsAt = alert.ValidUntil
|
|
||||||
}
|
|
||||||
res = append(res, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(alerts) > 0 {
|
|
||||||
s.Send(res...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// readyStorage implements the Storage interface while allowing to set the actual
|
// readyStorage implements the Storage interface while allowing to set the actual
|
||||||
// storage at a later point in time.
|
// storage at a later point in time.
|
||||||
type readyStorage struct {
|
type readyStorage struct {
|
||||||
|
@ -1411,6 +1427,10 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels,
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
return 0, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
@ -1473,11 +1493,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats implements the api_v1.TSDBAdminStats interface.
|
// Stats implements the api_v1.TSDBAdminStats interface.
|
||||||
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
|
func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
|
||||||
if x := s.get(); x != nil {
|
if x := s.get(); x != nil {
|
||||||
switch db := x.(type) {
|
switch db := x.(type) {
|
||||||
case *tsdb.DB:
|
case *tsdb.DB:
|
||||||
return db.Head().Stats(statsByLabelName), nil
|
return db.Head().Stats(statsByLabelName, limit), nil
|
||||||
case *agent.DB:
|
case *agent.DB:
|
||||||
return nil, agent.ErrUnsupported
|
return nil, agent.ErrUnsupported
|
||||||
default:
|
default:
|
||||||
|
@ -1533,7 +1553,9 @@ type tsdbOptions struct {
|
||||||
MaxBytes units.Base2Bytes
|
MaxBytes units.Base2Bytes
|
||||||
NoLockfile bool
|
NoLockfile bool
|
||||||
WALCompression bool
|
WALCompression bool
|
||||||
|
WALCompressionType string
|
||||||
HeadChunksWriteQueueSize int
|
HeadChunksWriteQueueSize int
|
||||||
|
SamplesPerChunk int
|
||||||
StripeSize int
|
StripeSize int
|
||||||
MinBlockDuration model.Duration
|
MinBlockDuration model.Duration
|
||||||
MaxBlockDuration model.Duration
|
MaxBlockDuration model.Duration
|
||||||
|
@ -1541,6 +1563,7 @@ type tsdbOptions struct {
|
||||||
EnableExemplarStorage bool
|
EnableExemplarStorage bool
|
||||||
MaxExemplars int64
|
MaxExemplars int64
|
||||||
EnableMemorySnapshotOnShutdown bool
|
EnableMemorySnapshotOnShutdown bool
|
||||||
|
EnableNativeHistograms bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
|
@ -1551,14 +1574,16 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
MaxBytes: int64(opts.MaxBytes),
|
MaxBytes: int64(opts.MaxBytes),
|
||||||
NoLockfile: opts.NoLockfile,
|
NoLockfile: opts.NoLockfile,
|
||||||
AllowOverlappingCompaction: true,
|
AllowOverlappingCompaction: true,
|
||||||
WALCompression: opts.WALCompression,
|
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||||
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
||||||
|
SamplesPerChunk: opts.SamplesPerChunk,
|
||||||
StripeSize: opts.StripeSize,
|
StripeSize: opts.StripeSize,
|
||||||
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
||||||
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
||||||
EnableExemplarStorage: opts.EnableExemplarStorage,
|
EnableExemplarStorage: opts.EnableExemplarStorage,
|
||||||
MaxExemplars: opts.MaxExemplars,
|
MaxExemplars: opts.MaxExemplars,
|
||||||
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
|
||||||
|
EnableNativeHistograms: opts.EnableNativeHistograms,
|
||||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1568,6 +1593,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
type agentOptions struct {
|
type agentOptions struct {
|
||||||
WALSegmentSize units.Base2Bytes
|
WALSegmentSize units.Base2Bytes
|
||||||
WALCompression bool
|
WALCompression bool
|
||||||
|
WALCompressionType string
|
||||||
StripeSize int
|
StripeSize int
|
||||||
TruncateFrequency model.Duration
|
TruncateFrequency model.Duration
|
||||||
MinWALTime, MaxWALTime model.Duration
|
MinWALTime, MaxWALTime model.Duration
|
||||||
|
@ -1577,7 +1603,7 @@ type agentOptions struct {
|
||||||
func (opts agentOptions) ToAgentOptions() agent.Options {
|
func (opts agentOptions) ToAgentOptions() agent.Options {
|
||||||
return agent.Options{
|
return agent.Options{
|
||||||
WALSegmentSize: int(opts.WALSegmentSize),
|
WALSegmentSize: int(opts.WALSegmentSize),
|
||||||
WALCompression: opts.WALCompression,
|
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||||
StripeSize: opts.StripeSize,
|
StripeSize: opts.StripeSize,
|
||||||
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
||||||
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -120,7 +121,7 @@ func TestFailedStartupExitCode(t *testing.T) {
|
||||||
fakeInputFile := "fake-input-file"
|
fakeInputFile := "fake-input-file"
|
||||||
expectedExitStatus := 2
|
expectedExitStatus := 2
|
||||||
|
|
||||||
prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
|
prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile)
|
||||||
err := prom.Run()
|
err := prom.Run()
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
|
@ -198,7 +199,7 @@ func TestSendAlerts(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.Equal(t, tc.exp, alerts)
|
require.Equal(t, tc.exp, alerts)
|
||||||
})
|
})
|
||||||
sendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
|
rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -357,7 +358,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
|
@ -375,7 +376,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
|
|
||||||
output := bytes.Buffer{}
|
output := bytes.Buffer{}
|
||||||
prom.Stderr = &output
|
prom.Stderr = &output
|
||||||
|
@ -402,7 +403,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
|
@ -437,7 +438,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||||
args := []string{"-test.main", tc.arg, t.TempDir()}
|
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||||
|
|
||||||
if tc.mode == "agent" {
|
if tc.mode == "agent" {
|
||||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||||
|
@ -483,3 +484,31 @@ func TestModeSpecificFlags(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDocumentation(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, promPath, "-test.main", "--write-documentation")
|
||||||
|
|
||||||
|
var stdout bytes.Buffer
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
|
if exitError.ExitCode() != 0 {
|
||||||
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test"))
|
||||||
|
|
||||||
|
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "prometheus.md"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
|
}
|
||||||
|
|
|
@ -72,9 +72,11 @@ Loop:
|
||||||
if !startedOk {
|
if !startedOk {
|
||||||
t.Fatal("prometheus didn't start in the specified timeout")
|
t.Fatal("prometheus didn't start in the specified timeout")
|
||||||
}
|
}
|
||||||
if err := prom.Process.Kill(); err == nil {
|
switch err := prom.Process.Kill(); {
|
||||||
|
case err == nil:
|
||||||
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
||||||
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
|
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
|
||||||
|
// TODO: find a better way to detect when the process didn't exit as expected!
|
||||||
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,7 +193,7 @@ func (p *queryLogTest) String() string {
|
||||||
}
|
}
|
||||||
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
||||||
if p.enabledAtStart {
|
if p.enabledAtStart {
|
||||||
name = name + ", enabled at start"
|
name += ", enabled at start"
|
||||||
}
|
}
|
||||||
if p.prefix != "" {
|
if p.prefix != "" {
|
||||||
name = name + ", with prefix " + p.prefix
|
name = name + ", with prefix " + p.prefix
|
||||||
|
|
|
@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
||||||
nextSampleTs int64 = math.MaxInt64
|
nextSampleTs int64 = math.MaxInt64
|
||||||
)
|
)
|
||||||
|
|
||||||
for t := mint; t <= maxt; t = t + blockDuration {
|
for t := mint; t <= maxt; t += blockDuration {
|
||||||
tsUpper := t + blockDuration
|
tsUpper := t + blockDuration
|
||||||
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
||||||
// The next sample is not in this timerange, we can avoid parsing
|
// The next sample is not in this timerange, we can avoid parsing
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type backfillSample struct {
|
type backfillSample struct {
|
||||||
|
@ -43,14 +44,14 @@ func sortSamples(samples []backfillSample) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
|
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
|
||||||
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
samples := []backfillSample{}
|
samples := []backfillSample{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
it := series.Iterator()
|
it := series.Iterator(nil)
|
||||||
require.NoError(t, it.Err())
|
require.NoError(t, it.Err())
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
|
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import (
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/prometheus/client_golang/api"
|
"github.com/prometheus/client_golang/api"
|
||||||
|
@ -41,10 +42,10 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"github.com/prometheus/exporter-toolkit/web"
|
"github.com/prometheus/exporter-toolkit/web"
|
||||||
"gopkg.in/alecthomas/kingpin.v2"
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
promconfig "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -58,6 +59,7 @@ import (
|
||||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -69,11 +71,20 @@ const (
|
||||||
lintOptionAll = "all"
|
lintOptionAll = "all"
|
||||||
lintOptionDuplicateRules = "duplicate-rules"
|
lintOptionDuplicateRules = "duplicate-rules"
|
||||||
lintOptionNone = "none"
|
lintOptionNone = "none"
|
||||||
|
checkHealth = "/-/healthy"
|
||||||
|
checkReadiness = "/-/ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
var (
|
||||||
|
httpRoundTripper = api.DefaultRoundTripper
|
||||||
|
serverURL *url.URL
|
||||||
|
remoteWriteURL *url.URL
|
||||||
|
httpConfigFilePath string
|
||||||
|
)
|
||||||
|
|
||||||
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
||||||
app.Version(version.Print("promtool"))
|
app.Version(version.Print("promtool"))
|
||||||
app.HelpFlag.Short('h')
|
app.HelpFlag.Short('h')
|
||||||
|
@ -105,11 +116,19 @@ func main() {
|
||||||
"The config files to check.",
|
"The config files to check.",
|
||||||
).Required().ExistingFiles()
|
).Required().ExistingFiles()
|
||||||
|
|
||||||
|
checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
|
||||||
|
checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
|
checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
|
||||||
|
checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
||||||
ruleFiles := checkRulesCmd.Arg(
|
ruleFiles := checkRulesCmd.Arg(
|
||||||
"rule-files",
|
"rule-files",
|
||||||
"The rule files to check.",
|
"The rule files to check, default is read from standard input.",
|
||||||
).Required().ExistingFiles()
|
).ExistingFiles()
|
||||||
checkRulesLint := checkRulesCmd.Flag(
|
checkRulesLint := checkRulesCmd.Flag(
|
||||||
"lint",
|
"lint",
|
||||||
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||||
|
@ -124,14 +143,15 @@ func main() {
|
||||||
|
|
||||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||||
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
||||||
|
queryCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
|
||||||
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
|
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
|
||||||
queryInstantServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||||
queryInstantExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String()
|
queryInstantExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String()
|
||||||
queryInstantTime := queryInstantCmd.Flag("time", "Query evaluation time (RFC3339 or Unix timestamp).").String()
|
queryInstantTime := queryInstantCmd.Flag("time", "Query evaluation time (RFC3339 or Unix timestamp).").String()
|
||||||
|
|
||||||
queryRangeCmd := queryCmd.Command("range", "Run range query.")
|
queryRangeCmd := queryCmd.Command("range", "Run range query.")
|
||||||
queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||||
queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String()
|
queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String()
|
||||||
queryRangeHeaders := queryRangeCmd.Flag("header", "Extra headers to send to server.").StringMap()
|
queryRangeHeaders := queryRangeCmd.Flag("header", "Extra headers to send to server.").StringMap()
|
||||||
queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String()
|
queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String()
|
||||||
|
@ -139,7 +159,7 @@ func main() {
|
||||||
queryRangeStep := queryRangeCmd.Flag("step", "Query step size (duration).").Duration()
|
queryRangeStep := queryRangeCmd.Flag("step", "Query step size (duration).").Duration()
|
||||||
|
|
||||||
querySeriesCmd := queryCmd.Command("series", "Run series query.")
|
querySeriesCmd := queryCmd.Command("series", "Run series query.")
|
||||||
querySeriesServer := querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||||
querySeriesMatch := querySeriesCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().Strings()
|
querySeriesMatch := querySeriesCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().Strings()
|
||||||
querySeriesBegin := querySeriesCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
querySeriesBegin := querySeriesCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
||||||
querySeriesEnd := querySeriesCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
querySeriesEnd := querySeriesCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||||
|
@ -153,12 +173,24 @@ func main() {
|
||||||
debugAllServer := debugAllCmd.Arg("server", "Prometheus server to get all debug information from.").Required().String()
|
debugAllServer := debugAllCmd.Arg("server", "Prometheus server to get all debug information from.").Required().String()
|
||||||
|
|
||||||
queryLabelsCmd := queryCmd.Command("labels", "Run labels query.")
|
queryLabelsCmd := queryCmd.Command("labels", "Run labels query.")
|
||||||
queryLabelsServer := queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URL()
|
queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||||
queryLabelsName := queryLabelsCmd.Arg("name", "Label name to provide label values for.").Required().String()
|
queryLabelsName := queryLabelsCmd.Arg("name", "Label name to provide label values for.").Required().String()
|
||||||
queryLabelsBegin := queryLabelsCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
queryLabelsBegin := queryLabelsCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String()
|
||||||
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||||
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
||||||
|
|
||||||
|
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
||||||
|
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
||||||
|
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
|
||||||
|
metricFiles := pushMetricsCmd.Arg(
|
||||||
|
"metric-files",
|
||||||
|
"The metric files to push, default is read from standard input.",
|
||||||
|
).ExistingFiles()
|
||||||
|
pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap()
|
||||||
|
pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration()
|
||||||
|
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
|
||||||
|
|
||||||
testCmd := app.Command("test", "Unit testing.")
|
testCmd := app.Command("test", "Unit testing.")
|
||||||
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
||||||
testRulesFiles := testRulesCmd.Arg(
|
testRulesFiles := testRulesCmd.Arg(
|
||||||
|
@ -190,6 +222,7 @@ func main() {
|
||||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||||
|
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String()
|
||||||
|
|
||||||
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
|
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
|
||||||
importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||||
|
@ -199,7 +232,8 @@ func main() {
|
||||||
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
|
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
|
||||||
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
|
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
|
||||||
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
|
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
|
||||||
importRulesURL := importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URL()
|
importRulesCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
|
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
|
||||||
Required().String()
|
Required().String()
|
||||||
importRulesEnd := importRulesCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp.").String()
|
importRulesEnd := importRulesCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp.").String()
|
||||||
|
@ -213,6 +247,8 @@ func main() {
|
||||||
|
|
||||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||||
|
|
||||||
|
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
||||||
|
|
||||||
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
|
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
|
||||||
|
|
||||||
var p printer
|
var p printer
|
||||||
|
@ -223,6 +259,22 @@ func main() {
|
||||||
p = &promqlPrinter{}
|
p = &promqlPrinter{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if httpConfigFilePath != "" {
|
||||||
|
if serverURL != nil && serverURL.User.Username() != "" {
|
||||||
|
kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time")
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath)
|
||||||
|
if err != nil {
|
||||||
|
kingpin.Fatalf("Failed to load HTTP config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version))
|
||||||
|
if err != nil {
|
||||||
|
kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var noDefaultScrapePort bool
|
var noDefaultScrapePort bool
|
||||||
for _, f := range *featureList {
|
for _, f := range *featureList {
|
||||||
opts := strings.Split(f, ",")
|
opts := strings.Split(f, ",")
|
||||||
|
@ -247,6 +299,12 @@ func main() {
|
||||||
case checkConfigCmd.FullCommand():
|
case checkConfigCmd.FullCommand():
|
||||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||||
|
|
||||||
|
case checkServerHealthCmd.FullCommand():
|
||||||
|
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
|
||||||
|
|
||||||
|
case checkServerReadyCmd.FullCommand():
|
||||||
|
os.Exit(checkErr(CheckServerStatus(serverURL, checkReadiness, httpRoundTripper)))
|
||||||
|
|
||||||
case checkWebConfigCmd.FullCommand():
|
case checkWebConfigCmd.FullCommand():
|
||||||
os.Exit(CheckWebConfig(*webConfigFiles...))
|
os.Exit(CheckWebConfig(*webConfigFiles...))
|
||||||
|
|
||||||
|
@ -256,14 +314,17 @@ func main() {
|
||||||
case checkMetricsCmd.FullCommand():
|
case checkMetricsCmd.FullCommand():
|
||||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||||
|
|
||||||
|
case pushMetricsCmd.FullCommand():
|
||||||
|
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...))
|
||||||
|
|
||||||
case queryInstantCmd.FullCommand():
|
case queryInstantCmd.FullCommand():
|
||||||
os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p))
|
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
|
||||||
|
|
||||||
case queryRangeCmd.FullCommand():
|
case queryRangeCmd.FullCommand():
|
||||||
os.Exit(QueryRange(*queryRangeServer, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
os.Exit(QueryRange(serverURL, httpRoundTripper, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p))
|
||||||
|
|
||||||
case querySeriesCmd.FullCommand():
|
case querySeriesCmd.FullCommand():
|
||||||
os.Exit(QuerySeries(*querySeriesServer, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p))
|
os.Exit(QuerySeries(serverURL, httpRoundTripper, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p))
|
||||||
|
|
||||||
case debugPprofCmd.FullCommand():
|
case debugPprofCmd.FullCommand():
|
||||||
os.Exit(debugPprof(*debugPprofServer))
|
os.Exit(debugPprof(*debugPprofServer))
|
||||||
|
@ -275,7 +336,7 @@ func main() {
|
||||||
os.Exit(debugAll(*debugAllServer))
|
os.Exit(debugAll(*debugAllServer))
|
||||||
|
|
||||||
case queryLabelsCmd.FullCommand():
|
case queryLabelsCmd.FullCommand():
|
||||||
os.Exit(QueryLabels(*queryLabelsServer, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
||||||
|
|
||||||
case testRulesCmd.FullCommand():
|
case testRulesCmd.FullCommand():
|
||||||
os.Exit(RulesUnitTest(
|
os.Exit(RulesUnitTest(
|
||||||
|
@ -296,13 +357,15 @@ func main() {
|
||||||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||||
|
|
||||||
case tsdbDumpCmd.FullCommand():
|
case tsdbDumpCmd.FullCommand():
|
||||||
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime)))
|
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch)))
|
||||||
// TODO(aSquare14): Work on adding support for custom block size.
|
// TODO(aSquare14): Work on adding support for custom block size.
|
||||||
case openMetricsImportCmd.FullCommand():
|
case openMetricsImportCmd.FullCommand():
|
||||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
||||||
|
|
||||||
case importRulesCmd.FullCommand():
|
case importRulesCmd.FullCommand():
|
||||||
os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||||
|
case documentationCmd.FullCommand():
|
||||||
|
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,6 +401,43 @@ func (ls lintConfig) lintDuplicateRules() bool {
|
||||||
return ls.all || ls.duplicateRules
|
return ls.all || ls.duplicateRules
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check server status - healthy & ready.
|
||||||
|
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
|
||||||
|
if serverURL.Scheme == "" {
|
||||||
|
serverURL.Scheme = "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
config := api.Config{
|
||||||
|
Address: serverURL.String() + checkEndpoint,
|
||||||
|
RoundTripper: roundTripper,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new client.
|
||||||
|
c, err := api.NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest("GET", config.Address, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
response, dataBytes, err := c.Do(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("check failed: URL=%s, status=%d", serverURL, response.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stderr, " SUCCESS: ", string(dataBytes))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CheckConfig validates configuration files.
|
// CheckConfig validates configuration files.
|
||||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
|
@ -357,20 +457,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
for _, rf := range ruleFiles {
|
rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
|
||||||
if n, errs := checkRules(rf, lintSettings); len(errs) > 0 {
|
if rulesFailed {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
failed = rulesFailed
|
||||||
for _, err := range errs {
|
}
|
||||||
fmt.Fprintln(os.Stderr, " ", err)
|
if rulesHasErrors {
|
||||||
}
|
hasErrors = rulesHasErrors
|
||||||
failed = true
|
|
||||||
for _, err := range errs {
|
|
||||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if failed && hasErrors {
|
if failed && hasErrors {
|
||||||
|
@ -437,7 +529,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, scfg := range cfg.ScrapeConfigs {
|
var scfgs []*config.ScrapeConfig
|
||||||
|
if checkSyntaxOnly {
|
||||||
|
scfgs = cfg.ScrapeConfigs
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
scfgs, err = cfg.GetScrapeConfigs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error loading scrape configs: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scfg := range scfgs {
|
||||||
if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
|
if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
|
||||||
if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
|
if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
|
||||||
return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
||||||
|
@ -587,9 +690,57 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
||||||
func CheckRules(ls lintConfig, files ...string) int {
|
func CheckRules(ls lintConfig, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
hasErrors := false
|
hasErrors := false
|
||||||
|
if len(files) == 0 {
|
||||||
|
fmt.Println("Checking standard input")
|
||||||
|
data, err := io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
rgs, errs := rulefmt.Parse(data)
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
for _, err := range errs {
|
||||||
|
hasErrors = hasErrors || !errors.Is(err, lintError)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
} else {
|
||||||
|
failed, hasErrors = checkRules(files, ls)
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed && hasErrors {
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
if failed && ls.fatal {
|
||||||
|
return lintErrExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkRules validates rule files.
|
||||||
|
func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||||
|
failed := false
|
||||||
|
hasErrors := false
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if n, errs := checkRules(f, ls); errs != nil {
|
fmt.Println("Checking", f)
|
||||||
|
rgs, errs := rulefmt.ParseFile(f)
|
||||||
|
if errs != nil {
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
@ -603,23 +754,10 @@ func CheckRules(ls lintConfig, files ...string) int {
|
||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
if failed && hasErrors {
|
return failed, hasErrors
|
||||||
return failureExitCode
|
|
||||||
}
|
|
||||||
if failed && ls.fatal {
|
|
||||||
return lintErrExitCode
|
|
||||||
}
|
|
||||||
return successExitCode
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkRules(filename string, lintSettings lintConfig) (int, []error) {
|
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
|
||||||
fmt.Println("Checking", filename)
|
|
||||||
|
|
||||||
rgs, errs := rulefmt.ParseFile(filename)
|
|
||||||
if errs != nil {
|
|
||||||
return successExitCode, errs
|
|
||||||
}
|
|
||||||
|
|
||||||
numRules := 0
|
numRules := 0
|
||||||
for _, rg := range rgs.Groups {
|
for _, rg := range rgs.Groups {
|
||||||
numRules += len(rg.Rules)
|
numRules += len(rg.Rules)
|
||||||
|
@ -631,9 +769,9 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) {
|
||||||
errMessage := fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules))
|
errMessage := fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules))
|
||||||
for _, n := range dRules {
|
for _, n := range dRules {
|
||||||
errMessage += fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric)
|
errMessage += fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric)
|
||||||
for _, l := range n.label {
|
n.label.Range(func(l labels.Label) {
|
||||||
errMessage += fmt.Sprintf("\t%s: %s\n", l.Name, l.Value)
|
errMessage += fmt.Sprintf("\t%s: %s\n", l.Name, l.Value)
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
errMessage += "Might cause inconsistency while recording expressions"
|
errMessage += "Might cause inconsistency while recording expressions"
|
||||||
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}
|
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}
|
||||||
|
@ -794,12 +932,13 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryInstant performs an instant query against a Prometheus server.
|
// QueryInstant performs an instant query against a Prometheus server.
|
||||||
func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
|
||||||
if url.Scheme == "" {
|
if url.Scheme == "" {
|
||||||
url.Scheme = "http"
|
url.Scheme = "http"
|
||||||
}
|
}
|
||||||
config := api.Config{
|
config := api.Config{
|
||||||
Address: url.String(),
|
Address: url.String(),
|
||||||
|
RoundTripper: roundTripper,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new client.
|
// Create new client.
|
||||||
|
@ -834,12 +973,13 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryRange performs a range query against a Prometheus server.
|
// QueryRange performs a range query against a Prometheus server.
|
||||||
func QueryRange(url *url.URL, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
|
||||||
if url.Scheme == "" {
|
if url.Scheme == "" {
|
||||||
url.Scheme = "http"
|
url.Scheme = "http"
|
||||||
}
|
}
|
||||||
config := api.Config{
|
config := api.Config{
|
||||||
Address: url.String(),
|
Address: url.String(),
|
||||||
|
RoundTripper: roundTripper,
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(headers) > 0 {
|
if len(headers) > 0 {
|
||||||
|
@ -847,7 +987,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
||||||
for key, value := range headers {
|
for key, value := range headers {
|
||||||
req.Header.Add(key, value)
|
req.Header.Add(key, value)
|
||||||
}
|
}
|
||||||
return http.DefaultTransport.RoundTrip(req)
|
return roundTripper.RoundTrip(req)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -907,12 +1047,13 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
||||||
}
|
}
|
||||||
|
|
||||||
// QuerySeries queries for a series against a Prometheus server.
|
// QuerySeries queries for a series against a Prometheus server.
|
||||||
func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) int {
|
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
|
||||||
if url.Scheme == "" {
|
if url.Scheme == "" {
|
||||||
url.Scheme = "http"
|
url.Scheme = "http"
|
||||||
}
|
}
|
||||||
config := api.Config{
|
config := api.Config{
|
||||||
Address: url.String(),
|
Address: url.String(),
|
||||||
|
RoundTripper: roundTripper,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new client.
|
// Create new client.
|
||||||
|
@ -943,12 +1084,13 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryLabels queries for label values against a Prometheus server.
|
// QueryLabels queries for label values against a Prometheus server.
|
||||||
func QueryLabels(url *url.URL, matchers []string, name, start, end string, p printer) int {
|
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
|
||||||
if url.Scheme == "" {
|
if url.Scheme == "" {
|
||||||
url.Scheme = "http"
|
url.Scheme = "http"
|
||||||
}
|
}
|
||||||
config := api.Config{
|
config := api.Config{
|
||||||
Address: url.String(),
|
Address: url.String(),
|
||||||
|
RoundTripper: roundTripper,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new client.
|
// Create new client.
|
||||||
|
@ -1153,7 +1295,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
|
||||||
|
|
||||||
// importRules backfills recording rules from the files provided. The output are blocks of data
|
// importRules backfills recording rules from the files provided. The output are blocks of data
|
||||||
// at the outputDir location.
|
// at the outputDir location.
|
||||||
func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
|
func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
var stime, etime time.Time
|
var stime, etime time.Time
|
||||||
var err error
|
var err error
|
||||||
|
@ -1183,7 +1325,8 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBl
|
||||||
maxBlockDuration: maxBlockDuration,
|
maxBlockDuration: maxBlockDuration,
|
||||||
}
|
}
|
||||||
client, err := api.NewClient(api.Config{
|
client, err := api.NewClient(api.Config{
|
||||||
Address: url.String(),
|
Address: url.String(),
|
||||||
|
RoundTripper: roundTripper,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new api client error: %w", err)
|
return fmt.Errorf("new api client error: %w", err)
|
||||||
|
@ -1219,8 +1362,11 @@ func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
|
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
|
||||||
|
var targets []*scrape.Target
|
||||||
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, tg := range targetGroups {
|
for _, tg := range targetGroups {
|
||||||
_, failures := scrape.TargetsFromGroup(tg, scfg, false)
|
var failures []error
|
||||||
|
targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb)
|
||||||
if len(failures) > 0 {
|
if len(failures) > 0 {
|
||||||
first := failures[0]
|
first := failures[0]
|
||||||
return first
|
return first
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -21,6 +23,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
@ -56,14 +59,14 @@ func TestQueryRange(t *testing.T) {
|
||||||
require.Equal(t, nil, err)
|
require.Equal(t, nil, err)
|
||||||
|
|
||||||
p := &promqlPrinter{}
|
p := &promqlPrinter{}
|
||||||
exitCode := QueryRange(urlObject, map[string]string{}, "up", "0", "300", 0, p)
|
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
|
||||||
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||||
form := getRequest().Form
|
form := getRequest().Form
|
||||||
require.Equal(t, "up", form.Get("query"))
|
require.Equal(t, "up", form.Get("query"))
|
||||||
require.Equal(t, "1", form.Get("step"))
|
require.Equal(t, "1", form.Get("step"))
|
||||||
require.Equal(t, 0, exitCode)
|
require.Equal(t, 0, exitCode)
|
||||||
|
|
||||||
exitCode = QueryRange(urlObject, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
exitCode = QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
||||||
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||||
form = getRequest().Form
|
form = getRequest().Form
|
||||||
require.Equal(t, "up", form.Get("query"))
|
require.Equal(t, "up", form.Get("query"))
|
||||||
|
@ -79,7 +82,7 @@ func TestQueryInstant(t *testing.T) {
|
||||||
require.Equal(t, nil, err)
|
require.Equal(t, nil, err)
|
||||||
|
|
||||||
p := &promqlPrinter{}
|
p := &promqlPrinter{}
|
||||||
exitCode := QueryInstant(urlObject, "up", "300", p)
|
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)
|
||||||
require.Equal(t, "/api/v1/query", getRequest().URL.Path)
|
require.Equal(t, "/api/v1/query", getRequest().URL.Path)
|
||||||
form := getRequest().Form
|
form := getRequest().Form
|
||||||
require.Equal(t, "up", form.Get("query"))
|
require.Equal(t, "up", form.Get("query"))
|
||||||
|
@ -433,3 +436,31 @@ func TestExitCodes(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDocumentation(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, promtoolPath, "-test.main", "write-documentation")
|
||||||
|
|
||||||
|
var stdout bytes.Buffer
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
|
if exitError.ExitCode() != 0 {
|
||||||
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promtoolPath), strings.TrimSuffix(filepath.Base(promtoolPath), ".test"))
|
||||||
|
|
||||||
|
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "promtool.md"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
|
}
|
||||||
|
|
138
cmd/promtool/metrics.go
Normal file
138
cmd/promtool/metrics.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
config_util "github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
|
"github.com/prometheus/prometheus/util/fmtutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Push metrics to a prometheus remote write (for testing purpose only).
|
||||||
|
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
|
||||||
|
addressURL, err := url.Parse(url.String())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// build remote write client
|
||||||
|
writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{
|
||||||
|
URL: &config_util.URL{URL: addressURL},
|
||||||
|
Timeout: model.Duration(timeout),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// set custom tls config from httpConfigFilePath
|
||||||
|
// set custom headers to every request
|
||||||
|
client, ok := writeClient.(*remote.Client)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient))
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
client.Client.Transport = &setHeadersTransport{
|
||||||
|
RoundTripper: roundTripper,
|
||||||
|
headers: headers,
|
||||||
|
}
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
var failed bool
|
||||||
|
|
||||||
|
if len(files) == 0 {
|
||||||
|
data, err = io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
fmt.Printf("Parsing standard input\n")
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics pushed to remote write.\n")
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
data, err = os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Parsing metrics file %s\n", file)
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed {
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
|
||||||
|
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := metricsData.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode the request body into snappy encoding.
|
||||||
|
compressed := snappy.Encode(nil, raw)
|
||||||
|
err = client.Store(context.Background(), compressed)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type setHeadersTransport struct {
|
||||||
|
http.RoundTripper
|
||||||
|
headers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
for key, value := range s.headers {
|
||||||
|
req.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
return s.RoundTripper.RoundTrip(req)
|
||||||
|
}
|
|
@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadGroups parses groups from a list of recording rule files.
|
// loadGroups parses groups from a list of recording rule files.
|
||||||
func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) {
|
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
return errs
|
return errs
|
||||||
|
@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
||||||
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
||||||
|
|
||||||
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration {
|
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
|
||||||
endOfBlock := startOfBlock + blockDuration - 1
|
endOfBlock := startOfBlock + blockDuration - 1
|
||||||
|
|
||||||
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
||||||
|
@ -158,14 +158,15 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
|
|
||||||
// Setting the rule labels after the output of the query,
|
// Setting the rule labels after the output of the query,
|
||||||
// so they can override query output.
|
// so they can override query output.
|
||||||
for _, l := range ruleLabels {
|
ruleLabels.Range(func(l labels.Label) {
|
||||||
lb.Set(l.Name, l.Value)
|
lb.Set(l.Name, l.Value)
|
||||||
}
|
})
|
||||||
|
|
||||||
lb.Set(labels.MetricName, ruleName)
|
lb.Set(labels.MetricName, ruleName)
|
||||||
|
lbls := lb.Labels()
|
||||||
|
|
||||||
for _, value := range sample.Values {
|
for _, value := range sample.Values {
|
||||||
if err := app.add(ctx, lb.Labels(nil), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
||||||
return fmt.Errorf("add: %w", err)
|
return fmt.Errorf("add: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,13 +28,14 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockQueryRangeAPI struct {
|
type mockQueryRangeAPI struct {
|
||||||
samples model.Matrix
|
samples model.Matrix
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
|
||||||
return mockAPI.samples, v1.Warnings{}, nil
|
return mockAPI.samples, v1.Warnings{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +100,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
require.Equal(t, 1, len(gRules))
|
require.Equal(t, 1, len(gRules))
|
||||||
require.Equal(t, "rule1", gRules[0].Name())
|
require.Equal(t, "rule1", gRules[0].Name())
|
||||||
require.Equal(t, "ruleExpr", gRules[0].Query().String())
|
require.Equal(t, "ruleExpr", gRules[0].Query().String())
|
||||||
require.Equal(t, 1, len(gRules[0].Labels()))
|
require.Equal(t, 1, gRules[0].Labels().Len())
|
||||||
|
|
||||||
group2 := ruleImporter.groups[path2+";group2"]
|
group2 := ruleImporter.groups[path2+";group2"]
|
||||||
require.NotNil(t, group2)
|
require.NotNil(t, group2)
|
||||||
|
@ -108,7 +109,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
require.Equal(t, 2, len(g2Rules))
|
require.Equal(t, 2, len(g2Rules))
|
||||||
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
|
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
|
||||||
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
|
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
|
||||||
require.Equal(t, 0, len(g2Rules[0].Labels()))
|
require.Equal(t, 0, g2Rules[0].Labels().Len())
|
||||||
|
|
||||||
// Backfill all recording rules then check the blocks to confirm the correct data was created.
|
// Backfill all recording rules then check the blocks to confirm the correct data was created.
|
||||||
errs = ruleImporter.importAll(ctx)
|
errs = ruleImporter.importAll(ctx)
|
||||||
|
@ -131,15 +132,15 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
for selectedSeries.Next() {
|
for selectedSeries.Next() {
|
||||||
seriesCount++
|
seriesCount++
|
||||||
series := selectedSeries.At()
|
series := selectedSeries.At()
|
||||||
if len(series.Labels()) != 3 {
|
if series.Labels().Len() != 3 {
|
||||||
require.Equal(t, 2, len(series.Labels()))
|
require.Equal(t, 2, series.Labels().Len())
|
||||||
x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1")
|
x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1")
|
||||||
require.Equal(t, x, series.Labels())
|
require.Equal(t, x, series.Labels())
|
||||||
} else {
|
} else {
|
||||||
require.Equal(t, 3, len(series.Labels()))
|
require.Equal(t, 3, series.Labels().Len())
|
||||||
}
|
}
|
||||||
it := series.Iterator()
|
it := series.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
samplesCount++
|
samplesCount++
|
||||||
ts, v := it.At()
|
ts, v := it.At()
|
||||||
if v == testValue {
|
if v == testValue {
|
||||||
|
@ -160,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
cfg := ruleImporterConfig{
|
cfg := ruleImporterConfig{
|
||||||
outputDir: tmpDir,
|
outputDir: tmpDir,
|
||||||
|
|
|
@ -47,9 +47,15 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
var scrapeConfig *config.ScrapeConfig
|
var scrapeConfig *config.ScrapeConfig
|
||||||
|
scfgs, err := cfg.GetScrapeConfigs()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Cannot load scrape configs", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
jobs := []string{}
|
jobs := []string{}
|
||||||
jobMatched := false
|
jobMatched := false
|
||||||
for _, v := range cfg.ScrapeConfigs {
|
for _, v := range scfgs {
|
||||||
jobs = append(jobs, v.JobName)
|
jobs = append(jobs, v.JobName)
|
||||||
if v.JobName == sdJobName {
|
if v.JobName == sdJobName {
|
||||||
jobMatched = true
|
jobMatched = true
|
||||||
|
@ -109,22 +115,22 @@ outerLoop:
|
||||||
|
|
||||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
||||||
sdCheckResults := []sdCheckResult{}
|
sdCheckResults := []sdCheckResult{}
|
||||||
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, targetGroup := range targetGroups {
|
for _, targetGroup := range targetGroups {
|
||||||
for _, target := range targetGroup.Targets {
|
for _, target := range targetGroup.Targets {
|
||||||
labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels))
|
lb.Reset(labels.EmptyLabels())
|
||||||
|
|
||||||
for name, value := range target {
|
for name, value := range target {
|
||||||
labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
|
lb.Set(string(name), string(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, value := range targetGroup.Labels {
|
for name, value := range targetGroup.Labels {
|
||||||
if _, ok := target[name]; !ok {
|
if _, ok := target[name]; !ok {
|
||||||
labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
|
lb.Set(string(name), string(value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
targetLabels := labels.New(labelSlice...)
|
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort)
|
||||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig, noDefaultScrapePort)
|
|
||||||
result := sdCheckResult{
|
result := sdCheckResult{
|
||||||
DiscoveredLabels: orig,
|
DiscoveredLabels: orig,
|
||||||
Labels: res,
|
Labels: res,
|
||||||
|
|
|
@ -23,24 +23,25 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
)
|
)
|
||||||
|
|
||||||
const timeDelta = 30000
|
const timeDelta = 30000
|
||||||
|
@ -314,7 +315,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
for scanner.Scan() && i < n {
|
for scanner.Scan() && i < n {
|
||||||
m := make(labels.Labels, 0, 10)
|
m := make([]labels.Label, 0, 10)
|
||||||
|
|
||||||
r := strings.NewReplacer("\"", "", "{", "", "}", "")
|
r := strings.NewReplacer("\"", "", "{", "", "}", "")
|
||||||
s := r.Replace(scanner.Text())
|
s := r.Replace(scanner.Text())
|
||||||
|
@ -324,13 +325,12 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
|
||||||
split := strings.Split(labelChunk, ":")
|
split := strings.Split(labelChunk, ":")
|
||||||
m = append(m, labels.Label{Name: split[0], Value: split[1]})
|
m = append(m, labels.Label{Name: split[0], Value: split[1]})
|
||||||
}
|
}
|
||||||
// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
|
ml := labels.New(m...) // This sorts by name - order of the k/v labels matters, don't assume we'll always receive them already sorted.
|
||||||
sort.Sort(m)
|
h := ml.Hash()
|
||||||
h := m.Hash()
|
|
||||||
if _, ok := hashes[h]; ok {
|
if _, ok := hashes[h]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
mets = append(mets, m)
|
mets = append(mets, ml)
|
||||||
hashes[h] = struct{}{}
|
hashes[h] = struct{}{}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
@ -397,25 +397,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
blocks, err := db.Blocks()
|
|
||||||
|
if blockID == "" {
|
||||||
|
blockID, err = db.LastBlockID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := db.Block(blockID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var block tsdb.BlockReader
|
|
||||||
if blockID != "" {
|
return db, b, nil
|
||||||
for _, b := range blocks {
|
|
||||||
if b.Meta().ULID.String() == blockID {
|
|
||||||
block = b
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if len(blocks) > 0 {
|
|
||||||
block = blocks[len(blocks)-1]
|
|
||||||
}
|
|
||||||
if block == nil {
|
|
||||||
return nil, nil, fmt.Errorf("block %s not found", blockID)
|
|
||||||
}
|
|
||||||
return db, block, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||||
|
@ -451,7 +446,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||||
postingInfos := []postingInfo{}
|
postingInfos := []postingInfo{}
|
||||||
|
|
||||||
printInfo := func(postingInfos []postingInfo) {
|
printInfo := func(postingInfos []postingInfo) {
|
||||||
sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric })
|
slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric })
|
||||||
|
|
||||||
for i, pc := range postingInfos {
|
for i, pc := range postingInfos {
|
||||||
if i >= limit {
|
if i >= limit {
|
||||||
|
@ -469,21 +464,21 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
lbls := labels.Labels{}
|
|
||||||
chks := []chunks.Meta{}
|
chks := []chunks.Meta{}
|
||||||
|
builder := labels.ScratchBuilder{}
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
if err = ir.Series(p.At(), &lbls, &chks); err != nil {
|
if err = ir.Series(p.At(), &builder, &chks); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Amount of the block time range not covered by this series.
|
// Amount of the block time range not covered by this series.
|
||||||
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
|
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
|
||||||
for _, lbl := range lbls {
|
builder.Labels().Range(func(lbl labels.Label) {
|
||||||
key := lbl.Name + "=" + lbl.Value
|
key := lbl.Name + "=" + lbl.Value
|
||||||
labelsUncovered[lbl.Name] += uncovered
|
labelsUncovered[lbl.Name] += uncovered
|
||||||
labelpairsUncovered[key] += uncovered
|
labelpairsUncovered[key] += uncovered
|
||||||
labelpairsCount[key]++
|
labelpairsCount[key]++
|
||||||
entries++
|
entries++
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
if p.Err() != nil {
|
if p.Err() != nil {
|
||||||
return p.Err()
|
return p.Err()
|
||||||
|
@ -588,10 +583,10 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
||||||
nBuckets := 10
|
nBuckets := 10
|
||||||
histogram := make([]int, nBuckets)
|
histogram := make([]int, nBuckets)
|
||||||
totalChunks := 0
|
totalChunks := 0
|
||||||
|
var builder labels.ScratchBuilder
|
||||||
for postingsr.Next() {
|
for postingsr.Next() {
|
||||||
lbsl := labels.Labels{}
|
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
|
if err := indexr.Series(postingsr.At(), &builder, &chks); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -624,7 +619,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpSamples(path string, mint, maxt int64) (err error) {
|
func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
||||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -638,13 +633,17 @@ func dumpSamples(path string, mint, maxt int64) (err error) {
|
||||||
}
|
}
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
|
|
||||||
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
matchers, err := parser.ParseMetricSelector(match)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ss := q.Select(false, nil, matchers...)
|
||||||
|
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
lbs := series.Labels()
|
lbs := series.Labels()
|
||||||
it := series.Iterator()
|
it := series.Iterator(nil)
|
||||||
for it.Next() {
|
for it.Next() == chunkenc.ValFloat {
|
||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(m) <= 0 {
|
if len(m) == 0 {
|
||||||
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
||||||
}
|
}
|
||||||
globbedFiles = append(globbedFiles, m...)
|
globbedFiles = append(globbedFiles, m...)
|
||||||
|
@ -284,8 +284,8 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||||
for _, a := range ar.ActiveAlerts() {
|
for _, a := range ar.ActiveAlerts() {
|
||||||
if a.State == rules.StateFiring {
|
if a.State == rules.StateFiring {
|
||||||
alerts = append(alerts, labelAndAnnotation{
|
alerts = append(alerts, labelAndAnnotation{
|
||||||
Labels: append(labels.Labels{}, a.Labels...),
|
Labels: a.Labels.Copy(),
|
||||||
Annotations: append(labels.Labels{}, a.Annotations...),
|
Annotations: a.Annotations.Copy(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -347,7 +347,7 @@ Outer:
|
||||||
for _, s := range got {
|
for _, s := range got {
|
||||||
gotSamples = append(gotSamples, parsedSample{
|
gotSamples = append(gotSamples, parsedSample{
|
||||||
Labels: s.Metric.Copy(),
|
Labels: s.Metric.Copy(),
|
||||||
Value: s.V,
|
Value: s.F,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,7 +434,7 @@ func (tg *testGroup) maxEvalTime() time.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
||||||
q, err := engine.NewInstantQuery(qu, nil, qs, t)
|
q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -447,7 +447,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
||||||
return v, nil
|
return v, nil
|
||||||
case promql.Scalar:
|
case promql.Scalar:
|
||||||
return promql.Vector{promql.Sample{
|
return promql.Vector{promql.Sample{
|
||||||
Point: promql.Point(v),
|
T: v.T,
|
||||||
|
F: v.V,
|
||||||
Metric: labels.Labels{},
|
Metric: labels.Labels{},
|
||||||
}}, nil
|
}}, nil
|
||||||
default:
|
default:
|
||||||
|
|
262
config/config.go
262
config/config.go
|
@ -34,6 +34,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -80,7 +81,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, v := range cfg.GlobalConfig.ExternalLabels {
|
b := labels.ScratchBuilder{}
|
||||||
|
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
|
||||||
newV := os.Expand(v.Value, func(s string) string {
|
newV := os.Expand(v.Value, func(s string) string {
|
||||||
if s == "$" {
|
if s == "$" {
|
||||||
return "$"
|
return "$"
|
||||||
|
@ -93,10 +95,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
})
|
})
|
||||||
if newV != v.Value {
|
if newV != v.Value {
|
||||||
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
||||||
v.Value = newV
|
|
||||||
cfg.GlobalConfig.ExternalLabels[i] = v
|
|
||||||
}
|
}
|
||||||
}
|
b.Add(v.Name, newV)
|
||||||
|
})
|
||||||
|
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,10 +114,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.
|
||||||
}
|
}
|
||||||
|
|
||||||
if agentMode {
|
if agentMode {
|
||||||
if len(cfg.RemoteWriteConfigs) == 0 {
|
|
||||||
return nil, errors.New("at least one remote_write target must be specified in agent mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
|
if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
|
||||||
return nil, errors.New("field alerting is not allowed in agent mode")
|
return nil, errors.New("field alerting is not allowed in agent mode")
|
||||||
}
|
}
|
||||||
|
@ -149,13 +147,14 @@ var (
|
||||||
|
|
||||||
// DefaultScrapeConfig is the default scrape configuration.
|
// DefaultScrapeConfig is the default scrape configuration.
|
||||||
DefaultScrapeConfig = ScrapeConfig{
|
DefaultScrapeConfig = ScrapeConfig{
|
||||||
// ScrapeTimeout and ScrapeInterval default to the
|
// ScrapeTimeout and ScrapeInterval default to the configured
|
||||||
// configured globals.
|
// globals.
|
||||||
MetricsPath: "/metrics",
|
ScrapeClassicHistograms: false,
|
||||||
Scheme: "http",
|
MetricsPath: "/metrics",
|
||||||
HonorLabels: false,
|
Scheme: "http",
|
||||||
HonorTimestamps: true,
|
HonorLabels: false,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HonorTimestamps: true,
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||||
|
@ -176,16 +175,16 @@ var (
|
||||||
|
|
||||||
// DefaultQueueConfig is the default remote queue configuration.
|
// DefaultQueueConfig is the default remote queue configuration.
|
||||||
DefaultQueueConfig = QueueConfig{
|
DefaultQueueConfig = QueueConfig{
|
||||||
// With a maximum of 200 shards, assuming an average of 100ms remote write
|
// With a maximum of 50 shards, assuming an average of 100ms remote write
|
||||||
// time and 500 samples per batch, we will be able to push 1M samples/s.
|
// time and 2000 samples per batch, we will be able to push 1M samples/s.
|
||||||
MaxShards: 200,
|
MaxShards: 50,
|
||||||
MinShards: 1,
|
MinShards: 1,
|
||||||
MaxSamplesPerSend: 500,
|
MaxSamplesPerSend: 2000,
|
||||||
|
|
||||||
// Each shard will have a max of 2500 samples pending in its channel, plus the pending
|
// Each shard will have a max of 10,000 samples pending in its channel, plus the pending
|
||||||
// samples that have been enqueued. Theoretically we should only ever have about 3000 samples
|
// samples that have been enqueued. Theoretically we should only ever have about 12,000 samples
|
||||||
// per shard pending. At 200 shards that's 600k.
|
// per shard pending. At 50 shards that's 600k.
|
||||||
Capacity: 2500,
|
Capacity: 10000,
|
||||||
BatchSendDeadline: model.Duration(5 * time.Second),
|
BatchSendDeadline: model.Duration(5 * time.Second),
|
||||||
|
|
||||||
// Backoff times for retrying a batch of samples on recoverable errors.
|
// Backoff times for retrying a batch of samples on recoverable errors.
|
||||||
|
@ -197,7 +196,7 @@ var (
|
||||||
DefaultMetadataConfig = MetadataConfig{
|
DefaultMetadataConfig = MetadataConfig{
|
||||||
Send: true,
|
Send: true,
|
||||||
SendInterval: model.Duration(1 * time.Minute),
|
SendInterval: model.Duration(1 * time.Minute),
|
||||||
MaxSamplesPerSend: 500,
|
MaxSamplesPerSend: 2000,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultRemoteReadConfig is the default remote read configuration.
|
// DefaultRemoteReadConfig is the default remote read configuration.
|
||||||
|
@ -219,12 +218,13 @@ var (
|
||||||
|
|
||||||
// Config is the top-level configuration for Prometheus's config files.
|
// Config is the top-level configuration for Prometheus's config files.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
GlobalConfig GlobalConfig `yaml:"global"`
|
GlobalConfig GlobalConfig `yaml:"global"`
|
||||||
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
|
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
|
||||||
RuleFiles []string `yaml:"rule_files,omitempty"`
|
RuleFiles []string `yaml:"rule_files,omitempty"`
|
||||||
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
|
||||||
StorageConfig StorageConfig `yaml:"storage,omitempty"`
|
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
||||||
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
|
StorageConfig StorageConfig `yaml:"storage,omitempty"`
|
||||||
|
TracingConfig TracingConfig `yaml:"tracing,omitempty"`
|
||||||
|
|
||||||
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||||
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
||||||
|
@ -238,6 +238,9 @@ func (c *Config) SetDirectory(dir string) {
|
||||||
for i, file := range c.RuleFiles {
|
for i, file := range c.RuleFiles {
|
||||||
c.RuleFiles[i] = config.JoinDir(dir, file)
|
c.RuleFiles[i] = config.JoinDir(dir, file)
|
||||||
}
|
}
|
||||||
|
for i, file := range c.ScrapeConfigFiles {
|
||||||
|
c.ScrapeConfigFiles[i] = config.JoinDir(dir, file)
|
||||||
|
}
|
||||||
for _, c := range c.ScrapeConfigs {
|
for _, c := range c.ScrapeConfigs {
|
||||||
c.SetDirectory(dir)
|
c.SetDirectory(dir)
|
||||||
}
|
}
|
||||||
|
@ -257,6 +260,58 @@ func (c Config) String() string {
|
||||||
return string(b)
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ScrapeConfigs returns the scrape configurations.
|
||||||
|
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||||
|
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||||
|
|
||||||
|
jobNames := map[string]string{}
|
||||||
|
for i, scfg := range c.ScrapeConfigs {
|
||||||
|
// We do these checks for library users that would not call Validate in
|
||||||
|
// Unmarshal.
|
||||||
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := jobNames[scfg.JobName]; ok {
|
||||||
|
return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
|
||||||
|
}
|
||||||
|
jobNames[scfg.JobName] = "main config file"
|
||||||
|
scfgs[i] = scfg
|
||||||
|
}
|
||||||
|
for _, pat := range c.ScrapeConfigFiles {
|
||||||
|
fs, err := filepath.Glob(pat)
|
||||||
|
if err != nil {
|
||||||
|
// The only error can be a bad pattern.
|
||||||
|
return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err)
|
||||||
|
}
|
||||||
|
for _, filename := range fs {
|
||||||
|
cfg := ScrapeConfigs{}
|
||||||
|
content, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fileErr(filename, err)
|
||||||
|
}
|
||||||
|
err = yaml.UnmarshalStrict(content, &cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fileErr(filename, err)
|
||||||
|
}
|
||||||
|
for _, scfg := range cfg.ScrapeConfigs {
|
||||||
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
|
return nil, fileErr(filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f, ok := jobNames[scfg.JobName]; ok {
|
||||||
|
return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f))
|
||||||
|
}
|
||||||
|
jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename))
|
||||||
|
|
||||||
|
scfg.SetDirectory(filepath.Dir(filename))
|
||||||
|
scfgs = append(scfgs, scfg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return scfgs, nil
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
*c = DefaultConfig
|
*c = DefaultConfig
|
||||||
|
@ -279,26 +334,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return fmt.Errorf("invalid rule file path %q", rf)
|
return fmt.Errorf("invalid rule file path %q", rf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, sf := range c.ScrapeConfigFiles {
|
||||||
|
if !patRulePath.MatchString(sf) {
|
||||||
|
return fmt.Errorf("invalid scrape config file path %q", sf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Do global overrides and validate unique names.
|
// Do global overrides and validate unique names.
|
||||||
jobNames := map[string]struct{}{}
|
jobNames := map[string]struct{}{}
|
||||||
for _, scfg := range c.ScrapeConfigs {
|
for _, scfg := range c.ScrapeConfigs {
|
||||||
if scfg == nil {
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
return errors.New("empty or null scrape config section")
|
return err
|
||||||
}
|
|
||||||
// First set the correct scrape interval, then check that the timeout
|
|
||||||
// (inferred or explicit) is not greater than that.
|
|
||||||
if scfg.ScrapeInterval == 0 {
|
|
||||||
scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval
|
|
||||||
}
|
|
||||||
if scfg.ScrapeTimeout > scfg.ScrapeInterval {
|
|
||||||
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName)
|
|
||||||
}
|
|
||||||
if scfg.ScrapeTimeout == 0 {
|
|
||||||
if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval {
|
|
||||||
scfg.ScrapeTimeout = scfg.ScrapeInterval
|
|
||||||
} else {
|
|
||||||
scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := jobNames[scfg.JobName]; ok {
|
if _, ok := jobNames[scfg.JobName]; ok {
|
||||||
|
@ -344,6 +391,24 @@ type GlobalConfig struct {
|
||||||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||||
|
// An uncompressed response body larger than this many bytes will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||||
|
// More than this many samples post metric-relabeling will cause the scrape to
|
||||||
|
// fail. 0 means no limit.
|
||||||
|
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||||
|
// More than this many targets after the target relabeling will cause the
|
||||||
|
// scrapes to fail. 0 means no limit.
|
||||||
|
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||||
|
// More than this many labels post metric-relabeling will cause the scrape to
|
||||||
|
// fail. 0 means no limit.
|
||||||
|
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||||
|
// More than this label name length post metric-relabeling will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||||
|
// More than this label value length post metric-relabeling will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -361,13 +426,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, l := range gc.ExternalLabels {
|
if err := gc.ExternalLabels.Validate(func(l labels.Label) error {
|
||||||
if !model.LabelName(l.Name).IsValid() {
|
if !model.LabelName(l.Name).IsValid() {
|
||||||
return fmt.Errorf("%q is not a valid label name", l.Name)
|
return fmt.Errorf("%q is not a valid label name", l.Name)
|
||||||
}
|
}
|
||||||
if !model.LabelValue(l.Value).IsValid() {
|
if !model.LabelValue(l.Value).IsValid() {
|
||||||
return fmt.Errorf("%q is not a valid label value", l.Value)
|
return fmt.Errorf("%q is not a valid label value", l.Value)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// First set the correct scrape interval, then check that the timeout
|
// First set the correct scrape interval, then check that the timeout
|
||||||
|
@ -394,13 +462,17 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
// isZero returns true iff the global config is the zero value.
|
// isZero returns true iff the global config is the zero value.
|
||||||
func (c *GlobalConfig) isZero() bool {
|
func (c *GlobalConfig) isZero() bool {
|
||||||
return c.ExternalLabels == nil &&
|
return c.ExternalLabels.IsEmpty() &&
|
||||||
c.ScrapeInterval == 0 &&
|
c.ScrapeInterval == 0 &&
|
||||||
c.ScrapeTimeout == 0 &&
|
c.ScrapeTimeout == 0 &&
|
||||||
c.EvaluationInterval == 0 &&
|
c.EvaluationInterval == 0 &&
|
||||||
c.QueryLogFile == ""
|
c.QueryLogFile == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ScrapeConfigs struct {
|
||||||
|
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// ScrapeConfig configures a scraping unit for Prometheus.
|
// ScrapeConfig configures a scraping unit for Prometheus.
|
||||||
type ScrapeConfig struct {
|
type ScrapeConfig struct {
|
||||||
// The job name to which the job label is set by default.
|
// The job name to which the job label is set by default.
|
||||||
|
@ -415,6 +487,8 @@ type ScrapeConfig struct {
|
||||||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||||
// The timeout for scraping targets of this config.
|
// The timeout for scraping targets of this config.
|
||||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||||
|
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||||
|
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||||
// The HTTP resource path on which to fetch metrics from targets.
|
// The HTTP resource path on which to fetch metrics from targets.
|
||||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||||
// The URL scheme with which to fetch metrics from targets.
|
// The URL scheme with which to fetch metrics from targets.
|
||||||
|
@ -423,20 +497,23 @@ type ScrapeConfig struct {
|
||||||
// scrape to fail. 0 means no limit.
|
// scrape to fail. 0 means no limit.
|
||||||
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||||
// More than this many samples post metric-relabeling will cause the scrape to
|
// More than this many samples post metric-relabeling will cause the scrape to
|
||||||
// fail.
|
// fail. 0 means no limit.
|
||||||
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||||
// More than this many targets after the target relabeling will cause the
|
// More than this many targets after the target relabeling will cause the
|
||||||
// scrapes to fail.
|
// scrapes to fail. 0 means no limit.
|
||||||
TargetLimit uint `yaml:"target_limit,omitempty"`
|
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||||
// More than this many labels post metric-relabeling will cause the scrape to
|
// More than this many labels post metric-relabeling will cause the scrape to
|
||||||
// fail.
|
// fail. 0 means no limit.
|
||||||
LabelLimit uint `yaml:"label_limit,omitempty"`
|
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||||
// More than this label name length post metric-relabeling will cause the
|
// More than this label name length post metric-relabeling will cause the
|
||||||
// scrape to fail.
|
// scrape to fail. 0 means no limit.
|
||||||
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||||
// More than this label value length post metric-relabeling will cause the
|
// More than this label value length post metric-relabeling will cause the
|
||||||
// scrape to fail.
|
// scrape to fail. 0 means no limit.
|
||||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||||
|
// More than this many buckets in a native histogram will cause the scrape to
|
||||||
|
// fail.
|
||||||
|
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
@ -494,6 +571,47 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
|
if c == nil {
|
||||||
|
return errors.New("empty or null scrape config section")
|
||||||
|
}
|
||||||
|
// First set the correct scrape interval, then check that the timeout
|
||||||
|
// (inferred or explicit) is not greater than that.
|
||||||
|
if c.ScrapeInterval == 0 {
|
||||||
|
c.ScrapeInterval = globalConfig.ScrapeInterval
|
||||||
|
}
|
||||||
|
if c.ScrapeTimeout > c.ScrapeInterval {
|
||||||
|
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
||||||
|
}
|
||||||
|
if c.ScrapeTimeout == 0 {
|
||||||
|
if globalConfig.ScrapeTimeout > c.ScrapeInterval {
|
||||||
|
c.ScrapeTimeout = c.ScrapeInterval
|
||||||
|
} else {
|
||||||
|
c.ScrapeTimeout = globalConfig.ScrapeTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.BodySizeLimit == 0 {
|
||||||
|
c.BodySizeLimit = globalConfig.BodySizeLimit
|
||||||
|
}
|
||||||
|
if c.SampleLimit == 0 {
|
||||||
|
c.SampleLimit = globalConfig.SampleLimit
|
||||||
|
}
|
||||||
|
if c.TargetLimit == 0 {
|
||||||
|
c.TargetLimit = globalConfig.TargetLimit
|
||||||
|
}
|
||||||
|
if c.LabelLimit == 0 {
|
||||||
|
c.LabelLimit = globalConfig.LabelLimit
|
||||||
|
}
|
||||||
|
if c.LabelNameLengthLimit == 0 {
|
||||||
|
c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit
|
||||||
|
}
|
||||||
|
if c.LabelValueLengthLimit == 0 {
|
||||||
|
c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
|
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
|
||||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||||
|
@ -776,12 +894,13 @@ func CheckTargetAddress(address model.LabelValue) error {
|
||||||
|
|
||||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||||
type RemoteWriteConfig struct {
|
type RemoteWriteConfig struct {
|
||||||
URL *config.URL `yaml:"url"`
|
URL *config.URL `yaml:"url"`
|
||||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||||
Headers map[string]string `yaml:"headers,omitempty"`
|
Headers map[string]string `yaml:"headers,omitempty"`
|
||||||
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||||
|
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
@ -789,6 +908,7 @@ type RemoteWriteConfig struct {
|
||||||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||||
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
||||||
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||||
|
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
|
@ -825,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||||
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||||
|
|
||||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
|
||||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SigV4Config != nil && c.AzureADConfig != nil {
|
||||||
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -847,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error {
|
||||||
func validateHeaders(headers map[string]string) error {
|
func validateHeaders(headers map[string]string) error {
|
||||||
for header := range headers {
|
for header := range headers {
|
||||||
if strings.ToLower(header) == "authorization" {
|
if strings.ToLower(header) == "authorization" {
|
||||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
|
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
|
||||||
}
|
}
|
||||||
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
||||||
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
||||||
|
@ -935,3 +1059,15 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
|
||||||
// Thus we just do its validation here.
|
// Thus we just do its validation here.
|
||||||
return c.HTTPClientConfig.Validate()
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func filePath(filename string) string {
|
||||||
|
absPath, err := filepath.Abs(filename)
|
||||||
|
if err != nil {
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
return absPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileErr(filename string, err error) error {
|
||||||
|
return fmt.Errorf("%q: %w", filePath(filename), err)
|
||||||
|
}
|
||||||
|
|
|
@ -47,6 +47,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery/moby"
|
"github.com/prometheus/prometheus/discovery/moby"
|
||||||
"github.com/prometheus/prometheus/discovery/nomad"
|
"github.com/prometheus/prometheus/discovery/nomad"
|
||||||
"github.com/prometheus/prometheus/discovery/openstack"
|
"github.com/prometheus/prometheus/discovery/openstack"
|
||||||
|
"github.com/prometheus/prometheus/discovery/ovhcloud"
|
||||||
"github.com/prometheus/prometheus/discovery/puppetdb"
|
"github.com/prometheus/prometheus/discovery/puppetdb"
|
||||||
"github.com/prometheus/prometheus/discovery/scaleway"
|
"github.com/prometheus/prometheus/discovery/scaleway"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -67,6 +68,15 @@ func mustParseURL(u string) *config.URL {
|
||||||
return &config.URL{URL: parsed}
|
return &config.URL{URL: parsed}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
globBodySizeLimit = 15 * units.MiB
|
||||||
|
globSampleLimit = 1500
|
||||||
|
globTargetLimit = 30
|
||||||
|
globLabelLimit = 30
|
||||||
|
globLabelNameLengthLimit = 200
|
||||||
|
globLabelValueLengthLimit = 200
|
||||||
|
)
|
||||||
|
|
||||||
var expectedConf = &Config{
|
var expectedConf = &Config{
|
||||||
GlobalConfig: GlobalConfig{
|
GlobalConfig: GlobalConfig{
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
|
@ -75,6 +85,13 @@ var expectedConf = &Config{
|
||||||
QueryLogFile: "",
|
QueryLogFile: "",
|
||||||
|
|
||||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||||
|
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
},
|
},
|
||||||
|
|
||||||
RuleFiles: []string{
|
RuleFiles: []string{
|
||||||
|
@ -164,10 +181,16 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "prometheus",
|
JobName: "prometheus",
|
||||||
|
|
||||||
HonorLabels: true,
|
HonorLabels: true,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -216,36 +239,59 @@ var expectedConf = &Config{
|
||||||
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
|
Regex: relabel.MustNewRegexp("(.*)some-[regex]"),
|
||||||
Replacement: "foo-${1}",
|
Replacement: "foo-${1}",
|
||||||
Action: relabel.Replace,
|
Action: relabel.Replace,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
SourceLabels: model.LabelNames{"abc"},
|
SourceLabels: model.LabelNames{"abc"},
|
||||||
TargetLabel: "cde",
|
TargetLabel: "cde",
|
||||||
Separator: ";",
|
Separator: ";",
|
||||||
Regex: relabel.DefaultRelabelConfig.Regex,
|
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||||
Replacement: relabel.DefaultRelabelConfig.Replacement,
|
Replacement: relabel.DefaultRelabelConfig.Replacement,
|
||||||
Action: relabel.Replace,
|
Action: relabel.Replace,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
TargetLabel: "abc",
|
TargetLabel: "abc",
|
||||||
Separator: ";",
|
Separator: ";",
|
||||||
Regex: relabel.DefaultRelabelConfig.Regex,
|
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||||
Replacement: "static",
|
Replacement: "static",
|
||||||
Action: relabel.Replace,
|
Action: relabel.Replace,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
TargetLabel: "abc",
|
TargetLabel: "abc",
|
||||||
Separator: ";",
|
Separator: ";",
|
||||||
Regex: relabel.MustNewRegexp(""),
|
Regex: relabel.MustNewRegexp(""),
|
||||||
Replacement: "static",
|
Replacement: "static",
|
||||||
Action: relabel.Replace,
|
Action: relabel.Replace,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
SourceLabels: model.LabelNames{"foo"},
|
||||||
|
TargetLabel: "abc",
|
||||||
|
Action: relabel.KeepEqual,
|
||||||
|
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||||
|
Replacement: relabel.DefaultRelabelConfig.Replacement,
|
||||||
|
Separator: relabel.DefaultRelabelConfig.Separator,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SourceLabels: model.LabelNames{"foo"},
|
||||||
|
TargetLabel: "abc",
|
||||||
|
Action: relabel.DropEqual,
|
||||||
|
Regex: relabel.DefaultRelabelConfig.Regex,
|
||||||
|
Replacement: relabel.DefaultRelabelConfig.Replacement,
|
||||||
|
Separator: relabel.DefaultRelabelConfig.Separator,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "service-x",
|
JobName: "service-x",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(50 * time.Second),
|
ScrapeInterval: model.Duration(50 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||||
BodySizeLimit: 10 * units.MiB,
|
BodySizeLimit: 10 * units.MiB,
|
||||||
SampleLimit: 1000,
|
SampleLimit: 1000,
|
||||||
|
TargetLimit: 35,
|
||||||
|
LabelLimit: 35,
|
||||||
|
LabelNameLengthLimit: 210,
|
||||||
|
LabelValueLengthLimit: 210,
|
||||||
|
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
BasicAuth: &config.BasicAuth{
|
BasicAuth: &config.BasicAuth{
|
||||||
|
@ -332,9 +378,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-y",
|
JobName: "service-y",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -343,6 +395,7 @@ var expectedConf = &Config{
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
&consul.SDConfig{
|
&consul.SDConfig{
|
||||||
Server: "localhost:1234",
|
Server: "localhost:1234",
|
||||||
|
PathPrefix: "/consul",
|
||||||
Token: "mysecret",
|
Token: "mysecret",
|
||||||
Services: []string{"nginx", "cache", "mysql"},
|
Services: []string{"nginx", "cache", "mysql"},
|
||||||
ServiceTags: []string{"canary", "v1"},
|
ServiceTags: []string{"canary", "v1"},
|
||||||
|
@ -378,9 +431,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-z",
|
JobName: "service-z",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
|
@ -403,9 +462,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-kubernetes",
|
JobName: "service-kubernetes",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -434,9 +499,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-kubernetes-namespaces",
|
JobName: "service-kubernetes-namespaces",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -465,9 +536,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-kuma",
|
JobName: "service-kuma",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -485,9 +562,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-marathon",
|
JobName: "service-marathon",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -514,9 +597,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-nomad",
|
JobName: "service-nomad",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -540,9 +629,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-ec2",
|
JobName: "service-ec2",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -566,15 +661,22 @@ var expectedConf = &Config{
|
||||||
Values: []string{"web", "db"},
|
Values: []string{"web", "db"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "service-lightsail",
|
JobName: "service-lightsail",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -582,21 +684,28 @@ var expectedConf = &Config{
|
||||||
|
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
&aws.LightsailSDConfig{
|
&aws.LightsailSDConfig{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
AccessKey: "access",
|
AccessKey: "access",
|
||||||
SecretKey: "mysecret",
|
SecretKey: "mysecret",
|
||||||
Profile: "profile",
|
Profile: "profile",
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
Port: 80,
|
Port: 80,
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "service-azure",
|
JobName: "service-azure",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -620,9 +729,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-nerve",
|
JobName: "service-nerve",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -639,9 +754,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "0123service-xxx",
|
JobName: "0123service-xxx",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -661,9 +782,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "badfederation",
|
JobName: "badfederation",
|
||||||
|
|
||||||
HonorTimestamps: false,
|
HonorTimestamps: false,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: "/federate",
|
MetricsPath: "/federate",
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -683,9 +810,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "測試",
|
JobName: "測試",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -705,9 +838,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "httpsd",
|
JobName: "httpsd",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -724,9 +863,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-triton",
|
JobName: "service-triton",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -751,9 +896,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "digitalocean-droplets",
|
JobName: "digitalocean-droplets",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -777,9 +928,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "docker",
|
JobName: "docker",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -799,9 +956,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "dockerswarm",
|
JobName: "dockerswarm",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -821,9 +984,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-openstack",
|
JobName: "service-openstack",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -847,9 +1016,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-puppetdb",
|
JobName: "service-puppetdb",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -875,10 +1050,16 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "hetzner",
|
JobName: "hetzner",
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -924,9 +1105,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "service-eureka",
|
JobName: "service-eureka",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -940,12 +1127,55 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
JobName: "ovhcloud",
|
||||||
|
|
||||||
|
HonorTimestamps: true,
|
||||||
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
&ovhcloud.SDConfig{
|
||||||
|
Endpoint: "ovh-eu",
|
||||||
|
ApplicationKey: "testAppKey",
|
||||||
|
ApplicationSecret: "testAppSecret",
|
||||||
|
ConsumerKey: "testConsumerKey",
|
||||||
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
|
Service: "vps",
|
||||||
|
},
|
||||||
|
&ovhcloud.SDConfig{
|
||||||
|
Endpoint: "ovh-eu",
|
||||||
|
ApplicationKey: "testAppKey",
|
||||||
|
ApplicationSecret: "testAppSecret",
|
||||||
|
ConsumerKey: "testConsumerKey",
|
||||||
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
|
Service: "dedicated_server",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
JobName: "scaleway",
|
JobName: "scaleway",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -978,9 +1208,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "linode-instances",
|
JobName: "linode-instances",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1005,9 +1241,16 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "uyuni",
|
JobName: "uyuni",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1024,10 +1267,16 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "ionos",
|
JobName: "ionos",
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1049,9 +1298,15 @@ var expectedConf = &Config{
|
||||||
{
|
{
|
||||||
JobName: "vultr",
|
JobName: "vultr",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1175,7 +1430,7 @@ func TestElideSecrets(t *testing.T) {
|
||||||
yamlConfig := string(config)
|
yamlConfig := string(config)
|
||||||
|
|
||||||
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
||||||
require.Equal(t, 18, len(matches), "wrong number of secret matches found")
|
require.Equal(t, 22, len(matches), "wrong number of secret matches found")
|
||||||
require.NotContains(t, yamlConfig, "mysecret",
|
require.NotContains(t, yamlConfig, "mysecret",
|
||||||
"yaml marshal reveals authentication credentials.")
|
"yaml marshal reveals authentication credentials.")
|
||||||
}
|
}
|
||||||
|
@ -1286,6 +1541,22 @@ var expectedErrors = []struct {
|
||||||
filename: "labeldrop5.bad.yml",
|
filename: "labeldrop5.bad.yml",
|
||||||
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "dropequal.bad.yml",
|
||||||
|
errMsg: "relabel configuration for dropequal action requires 'target_label' value",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "dropequal1.bad.yml",
|
||||||
|
errMsg: "dropequal action requires only 'source_labels' and `target_label`, and no other fields",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "keepequal.bad.yml",
|
||||||
|
errMsg: "relabel configuration for keepequal action requires 'target_label' value",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "keepequal1.bad.yml",
|
||||||
|
errMsg: "keepequal action requires only 'source_labels' and `target_label`, and no other fields",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "labelmap.bad.yml",
|
filename: "labelmap.bad.yml",
|
||||||
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
|
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
|
||||||
|
@ -1456,7 +1727,7 @@ var expectedErrors = []struct {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "remote_write_authorization_header.bad.yml",
|
filename: "remote_write_authorization_header.bad.yml",
|
||||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
|
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "remote_write_url_missing.bad.yml",
|
filename: "remote_write_url_missing.bad.yml",
|
||||||
|
@ -1618,6 +1889,18 @@ var expectedErrors = []struct {
|
||||||
filename: "ionos_datacenter.bad.yml",
|
filename: "ionos_datacenter.bad.yml",
|
||||||
errMsg: "datacenter id can't be empty",
|
errMsg: "datacenter id can't be empty",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "ovhcloud_no_secret.bad.yml",
|
||||||
|
errMsg: "application secret can not be empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "ovhcloud_bad_service.bad.yml",
|
||||||
|
errMsg: "unknown service: fakeservice",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "scrape_config_files_glob.bad.yml",
|
||||||
|
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadConfigs(t *testing.T) {
|
func TestBadConfigs(t *testing.T) {
|
||||||
|
@ -1670,6 +1953,33 @@ func TestExpandExternalLabels(t *testing.T) {
|
||||||
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAgentMode(t *testing.T) {
|
||||||
|
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger())
|
||||||
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
|
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger())
|
||||||
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
|
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger())
|
||||||
|
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||||
|
|
||||||
|
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger())
|
||||||
|
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||||
|
|
||||||
|
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.RemoteWriteConfigs, 0)
|
||||||
|
|
||||||
|
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||||
|
require.Equal(
|
||||||
|
t,
|
||||||
|
"http://remote1/push",
|
||||||
|
c.RemoteWriteConfigs[0].URL.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func TestEmptyGlobalBlock(t *testing.T) {
|
func TestEmptyGlobalBlock(t *testing.T) {
|
||||||
c, err := Load("global:\n", false, log.NewNopLogger())
|
c, err := Load("global:\n", false, log.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -1677,6 +1987,156 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
||||||
require.Equal(t, exp, *c)
|
require.Equal(t, exp, *c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetScrapeConfigs(t *testing.T) {
|
||||||
|
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig {
|
||||||
|
return &ScrapeConfig{
|
||||||
|
JobName: jobName,
|
||||||
|
HonorTimestamps: true,
|
||||||
|
ScrapeInterval: scrapeInterval,
|
||||||
|
ScrapeTimeout: scrapeTimeout,
|
||||||
|
MetricsPath: "/metrics",
|
||||||
|
Scheme: "http",
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
discovery.StaticConfig{
|
||||||
|
{
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
model.AddressLabel: "localhost:8080",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Source: "0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
configFile string
|
||||||
|
expectedResult []*ScrapeConfig
|
||||||
|
expectedError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "An included config file should be a valid global config.",
|
||||||
|
configFile: "testdata/scrape_config_files.good.yml",
|
||||||
|
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that only include a scrape config file.",
|
||||||
|
configFile: "testdata/scrape_config_files_only.good.yml",
|
||||||
|
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that combine scrape config files and scrape configs.",
|
||||||
|
configFile: "testdata/scrape_config_files_combined.good.yml",
|
||||||
|
expectedResult: []*ScrapeConfig{
|
||||||
|
sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second)),
|
||||||
|
sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second)),
|
||||||
|
sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that includes a scrape config file with globs",
|
||||||
|
configFile: "testdata/scrape_config_files_glob.good.yml",
|
||||||
|
expectedResult: []*ScrapeConfig{
|
||||||
|
{
|
||||||
|
JobName: "prometheus",
|
||||||
|
|
||||||
|
HonorTimestamps: true,
|
||||||
|
ScrapeInterval: model.Duration(60 * time.Second),
|
||||||
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
|
||||||
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
|
TLSConfig: config.TLSConfig{
|
||||||
|
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
|
||||||
|
KeyFile: filepath.FromSlash("testdata/scrape_configs/valid_key_file"),
|
||||||
|
},
|
||||||
|
FollowRedirects: true,
|
||||||
|
EnableHTTP2: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
discovery.StaticConfig{
|
||||||
|
{
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{model.AddressLabel: "localhost:8080"},
|
||||||
|
},
|
||||||
|
Source: "0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
JobName: "node",
|
||||||
|
|
||||||
|
HonorTimestamps: true,
|
||||||
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
|
TLSConfig: config.TLSConfig{
|
||||||
|
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||||
|
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||||
|
},
|
||||||
|
FollowRedirects: true,
|
||||||
|
EnableHTTP2: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
&vultr.SDConfig{
|
||||||
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
|
Authorization: &config.Authorization{
|
||||||
|
Type: "Bearer",
|
||||||
|
Credentials: "abcdef",
|
||||||
|
},
|
||||||
|
FollowRedirects: true,
|
||||||
|
EnableHTTP2: true,
|
||||||
|
},
|
||||||
|
Port: 80,
|
||||||
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that includes twice the same scrape configs.",
|
||||||
|
configFile: "testdata/scrape_config_files_double_import.bad.yml",
|
||||||
|
expectedError: `found multiple scrape configs with job name "prometheus"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that includes a scrape config identical to a scrape config in the main file.",
|
||||||
|
configFile: "testdata/scrape_config_files_duplicate.bad.yml",
|
||||||
|
expectedError: `found multiple scrape configs with job name "prometheus"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "An global config that includes a scrape config file with errors.",
|
||||||
|
configFile: "testdata/scrape_config_files_global.bad.yml",
|
||||||
|
expectedError: `scrape timeout greater than scrape interval for scrape config with job name "prometheus"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
scfgs, err := c.GetScrapeConfigs()
|
||||||
|
if len(tc.expectedError) > 0 {
|
||||||
|
require.ErrorContains(t, err, tc.expectedError)
|
||||||
|
}
|
||||||
|
require.Equal(t, tc.expectedResult, scfgs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func kubernetesSDHostURL() config.URL {
|
func kubernetesSDHostURL() config.URL {
|
||||||
tURL, _ := url.Parse("https://localhost:1234")
|
tURL, _ := url.Parse("https://localhost:1234")
|
||||||
return config.URL{URL: tURL}
|
return config.URL{URL: tURL}
|
||||||
|
|
2
config/testdata/agent_mode.good.yml
vendored
Normal file
2
config/testdata/agent_mode.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
remote_write:
|
||||||
|
- url: http://remote1/push
|
6
config/testdata/agent_mode.with_alert_manager.yml
vendored
Normal file
6
config/testdata/agent_mode.with_alert_manager.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- scheme: https
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- "1.2.3.4:9093"
|
5
config/testdata/agent_mode.with_alert_relabels.yml
vendored
Normal file
5
config/testdata/agent_mode.with_alert_relabels.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
alerting:
|
||||||
|
alert_relabel_configs:
|
||||||
|
- action: uppercase
|
||||||
|
source_labels: [instance]
|
||||||
|
target_label: instance
|
5
config/testdata/agent_mode.with_remote_reads.yml
vendored
Normal file
5
config/testdata/agent_mode.with_remote_reads.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
remote_read:
|
||||||
|
- url: http://remote1/read
|
||||||
|
read_recent: true
|
||||||
|
name: default
|
||||||
|
enable_http2: false
|
3
config/testdata/agent_mode.with_rule_files.yml
vendored
Normal file
3
config/testdata/agent_mode.with_rule_files.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
rule_files:
|
||||||
|
- "first.rules"
|
||||||
|
- "my/*.rules"
|
2
config/testdata/agent_mode.without_remote_writes.yml
vendored
Normal file
2
config/testdata/agent_mode.without_remote_writes.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
33
config/testdata/conf.good.yml
vendored
33
config/testdata/conf.good.yml
vendored
|
@ -2,6 +2,12 @@
|
||||||
global:
|
global:
|
||||||
scrape_interval: 15s
|
scrape_interval: 15s
|
||||||
evaluation_interval: 30s
|
evaluation_interval: 30s
|
||||||
|
body_size_limit: 15MB
|
||||||
|
sample_limit: 1500
|
||||||
|
target_limit: 30
|
||||||
|
label_limit: 30
|
||||||
|
label_name_length_limit: 200
|
||||||
|
label_value_length_limit: 200
|
||||||
# scrape_timeout is set to the global default (10s).
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
external_labels:
|
external_labels:
|
||||||
|
@ -87,6 +93,12 @@ scrape_configs:
|
||||||
- regex:
|
- regex:
|
||||||
replacement: static
|
replacement: static
|
||||||
target_label: abc
|
target_label: abc
|
||||||
|
- source_labels: [foo]
|
||||||
|
target_label: abc
|
||||||
|
action: keepequal
|
||||||
|
- source_labels: [foo]
|
||||||
|
target_label: abc
|
||||||
|
action: dropequal
|
||||||
|
|
||||||
authorization:
|
authorization:
|
||||||
credentials_file: valid_token_file
|
credentials_file: valid_token_file
|
||||||
|
@ -105,6 +117,11 @@ scrape_configs:
|
||||||
|
|
||||||
body_size_limit: 10MB
|
body_size_limit: 10MB
|
||||||
sample_limit: 1000
|
sample_limit: 1000
|
||||||
|
target_limit: 35
|
||||||
|
label_limit: 35
|
||||||
|
label_name_length_limit: 210
|
||||||
|
label_value_length_limit: 210
|
||||||
|
|
||||||
|
|
||||||
metrics_path: /my_path
|
metrics_path: /my_path
|
||||||
scheme: https
|
scheme: https
|
||||||
|
@ -145,6 +162,7 @@ scrape_configs:
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "localhost:1234"
|
- server: "localhost:1234"
|
||||||
token: mysecret
|
token: mysecret
|
||||||
|
path_prefix: /consul
|
||||||
services: ["nginx", "cache", "mysql"]
|
services: ["nginx", "cache", "mysql"]
|
||||||
tags: ["canary", "v1"]
|
tags: ["canary", "v1"]
|
||||||
node_meta:
|
node_meta:
|
||||||
|
@ -349,6 +367,21 @@ scrape_configs:
|
||||||
eureka_sd_configs:
|
eureka_sd_configs:
|
||||||
- server: "http://eureka.example.com:8761/eureka"
|
- server: "http://eureka.example.com:8761/eureka"
|
||||||
|
|
||||||
|
- job_name: ovhcloud
|
||||||
|
ovhcloud_sd_configs:
|
||||||
|
- service: vps
|
||||||
|
endpoint: ovh-eu
|
||||||
|
application_key: testAppKey
|
||||||
|
application_secret: testAppSecret
|
||||||
|
consumer_key: testConsumerKey
|
||||||
|
refresh_interval: 1m
|
||||||
|
- service: dedicated_server
|
||||||
|
endpoint: ovh-eu
|
||||||
|
application_key: testAppKey
|
||||||
|
application_secret: testAppSecret
|
||||||
|
consumer_key: testConsumerKey
|
||||||
|
refresh_interval: 1m
|
||||||
|
|
||||||
- job_name: scaleway
|
- job_name: scaleway
|
||||||
scaleway_sd_configs:
|
scaleway_sd_configs:
|
||||||
- role: instance
|
- role: instance
|
||||||
|
|
5
config/testdata/dropequal.bad.yml
vendored
Normal file
5
config/testdata/dropequal.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: dropequal
|
7
config/testdata/dropequal1.bad.yml
vendored
Normal file
7
config/testdata/dropequal1.bad.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: dropequal
|
||||||
|
regex: foo
|
||||||
|
target_label: bar
|
5
config/testdata/keepequal.bad.yml
vendored
Normal file
5
config/testdata/keepequal.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: keepequal
|
7
config/testdata/keepequal1.bad.yml
vendored
Normal file
7
config/testdata/keepequal1.bad.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: keepequal
|
||||||
|
regex: foo
|
||||||
|
target_label: bar
|
8
config/testdata/ovhcloud_bad_service.bad.yml
vendored
Normal file
8
config/testdata/ovhcloud_bad_service.bad.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
scrape_configs:
|
||||||
|
- ovhcloud_sd_configs:
|
||||||
|
- service: fakeservice
|
||||||
|
endpoint: ovh-eu
|
||||||
|
application_key: testAppKey
|
||||||
|
application_secret: testAppSecret
|
||||||
|
consumer_key: testConsumerKey
|
||||||
|
refresh_interval: 1m
|
7
config/testdata/ovhcloud_no_secret.bad.yml
vendored
Normal file
7
config/testdata/ovhcloud_no_secret.bad.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
scrape_configs:
|
||||||
|
- ovhcloud_sd_configs:
|
||||||
|
- service: dedicated_server
|
||||||
|
endpoint: ovh-eu
|
||||||
|
application_key: testAppKey
|
||||||
|
consumer_key: testConsumerKey
|
||||||
|
refresh_interval: 1m
|
6
config/testdata/scrape_config_files.bad.yml
vendored
Normal file
6
config/testdata/scrape_config_files.bad.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
scrape_interval: 10s
|
||||||
|
scrape_timeout: 20s
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
4
config/testdata/scrape_config_files.good.yml
vendored
Normal file
4
config/testdata/scrape_config_files.good.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
4
config/testdata/scrape_config_files2.good.yml
vendored
Normal file
4
config/testdata/scrape_config_files2.good.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: alertmanager
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
7
config/testdata/scrape_config_files_combined.good.yml
vendored
Normal file
7
config/testdata/scrape_config_files_combined.good.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.good.yml
|
||||||
|
- scrape_config_files2.good.yml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
3
config/testdata/scrape_config_files_double_import.bad.yml
vendored
Normal file
3
config/testdata/scrape_config_files_double_import.bad.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.good.yml
|
||||||
|
- scrape_config_files.good.yml
|
6
config/testdata/scrape_config_files_duplicate.bad.yml
vendored
Normal file
6
config/testdata/scrape_config_files_duplicate.bad.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.good.yml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
6
config/testdata/scrape_config_files_glob.bad.yml
vendored
Normal file
6
config/testdata/scrape_config_files_glob.bad.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_configs/*/*
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
2
config/testdata/scrape_config_files_glob.good.yml
vendored
Normal file
2
config/testdata/scrape_config_files_glob.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_configs/*.yml
|
2
config/testdata/scrape_config_files_global.bad.yml
vendored
Normal file
2
config/testdata/scrape_config_files_global.bad.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.bad.yml
|
11
config/testdata/scrape_config_files_global_duplicate.bad.yml
vendored
Normal file
11
config/testdata/scrape_config_files_global_duplicate.bad.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.good.yml
|
||||||
|
- scrape_config_files.good.yml
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
2
config/testdata/scrape_config_files_only.good.yml
vendored
Normal file
2
config/testdata/scrape_config_files_only.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config_files.good.yml
|
7
config/testdata/scrape_configs/scrape_config_files1.good.yml
vendored
Normal file
7
config/testdata/scrape_configs/scrape_config_files1.good.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
||||||
|
tls_config:
|
||||||
|
cert_file: valid_cert_file
|
||||||
|
key_file: valid_key_file
|
9
config/testdata/scrape_configs/scrape_config_files2.good.yml
vendored
Normal file
9
config/testdata/scrape_configs/scrape_config_files2.good.yml
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
scrape_interval: 15s
|
||||||
|
tls_config:
|
||||||
|
cert_file: ../valid_cert_file
|
||||||
|
key_file: ../valid_key_file
|
||||||
|
vultr_sd_configs:
|
||||||
|
- authorization:
|
||||||
|
credentials: abcdef
|
|
@ -66,8 +66,9 @@ const (
|
||||||
|
|
||||||
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
// DefaultEC2SDConfig is the default EC2 SD configuration.
|
||||||
var DefaultEC2SDConfig = EC2SDConfig{
|
var DefaultEC2SDConfig = EC2SDConfig{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -91,6 +92,8 @@ type EC2SDConfig struct {
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||||
Port int `yaml:"port"`
|
Port int `yaml:"port"`
|
||||||
Filters []*EC2Filter `yaml:"filters"`
|
Filters []*EC2Filter `yaml:"filters"`
|
||||||
|
|
||||||
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the EC2 Config.
|
// Name returns the name of the EC2 Config.
|
||||||
|
@ -161,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
|
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||||
if d.ec2 != nil {
|
if d.ec2 != nil {
|
||||||
return d.ec2, nil
|
return d.ec2, nil
|
||||||
}
|
}
|
||||||
|
@ -171,11 +174,17 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
|
||||||
creds = nil
|
creds = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "ec2_sd")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
sess, err := session.NewSessionWithOptions(session.Options{
|
sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
Config: aws.Config{
|
Config: aws.Config{
|
||||||
Endpoint: &d.cfg.Endpoint,
|
Endpoint: &d.cfg.Endpoint,
|
||||||
Region: &d.cfg.Region,
|
Region: &d.cfg.Region,
|
||||||
Credentials: creds,
|
Credentials: creds,
|
||||||
|
HTTPClient: client,
|
||||||
},
|
},
|
||||||
Profile: d.cfg.Profile,
|
Profile: d.cfg.Profile,
|
||||||
})
|
})
|
||||||
|
|
|
@ -56,8 +56,9 @@ const (
|
||||||
|
|
||||||
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
|
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
|
||||||
var DefaultLightsailSDConfig = LightsailSDConfig{
|
var DefaultLightsailSDConfig = LightsailSDConfig{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -74,6 +75,8 @@ type LightsailSDConfig struct {
|
||||||
RoleARN string `yaml:"role_arn,omitempty"`
|
RoleARN string `yaml:"role_arn,omitempty"`
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||||
Port int `yaml:"port"`
|
Port int `yaml:"port"`
|
||||||
|
|
||||||
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the Lightsail Config.
|
// Name returns the name of the Lightsail Config.
|
||||||
|
@ -144,11 +147,17 @@ func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
|
||||||
creds = nil
|
creds = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "lightsail_sd")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
sess, err := session.NewSessionWithOptions(session.Options{
|
sess, err := session.NewSessionWithOptions(session.Options{
|
||||||
Config: aws.Config{
|
Config: aws.Config{
|
||||||
Endpoint: &d.cfg.Endpoint,
|
Endpoint: &d.cfg.Endpoint,
|
||||||
Region: &d.cfg.Region,
|
Region: &d.cfg.Region,
|
||||||
Credentials: creds,
|
Credentials: creds,
|
||||||
|
HTTPClient: client,
|
||||||
},
|
},
|
||||||
Profile: d.cfg.Profile,
|
Profile: d.cfg.Profile,
|
||||||
})
|
})
|
||||||
|
|
|
@ -55,6 +55,7 @@ const (
|
||||||
azureLabelMachinePublicIP = azureLabel + "machine_public_ip"
|
azureLabelMachinePublicIP = azureLabel + "machine_public_ip"
|
||||||
azureLabelMachineTag = azureLabel + "machine_tag_"
|
azureLabelMachineTag = azureLabel + "machine_tag_"
|
||||||
azureLabelMachineScaleSet = azureLabel + "machine_scale_set"
|
azureLabelMachineScaleSet = azureLabel + "machine_scale_set"
|
||||||
|
azureLabelMachineSize = azureLabel + "machine_size"
|
||||||
|
|
||||||
authMethodOAuth = "OAuth"
|
authMethodOAuth = "OAuth"
|
||||||
authMethodManagedIdentity = "ManagedIdentity"
|
authMethodManagedIdentity = "ManagedIdentity"
|
||||||
|
@ -261,6 +262,7 @@ type virtualMachine struct {
|
||||||
ScaleSet string
|
ScaleSet string
|
||||||
Tags map[string]*string
|
Tags map[string]*string
|
||||||
NetworkInterfaces []string
|
NetworkInterfaces []string
|
||||||
|
Size string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new azureResource object from an ID string.
|
// Create a new azureResource object from an ID string.
|
||||||
|
@ -343,6 +345,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
azureLabelMachineOSType: model.LabelValue(vm.OsType),
|
azureLabelMachineOSType: model.LabelValue(vm.OsType),
|
||||||
azureLabelMachineLocation: model.LabelValue(vm.Location),
|
azureLabelMachineLocation: model.LabelValue(vm.Location),
|
||||||
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
|
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
|
||||||
|
azureLabelMachineSize: model.LabelValue(vm.Size),
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.ScaleSet != "" {
|
if vm.ScaleSet != "" {
|
||||||
|
@ -514,6 +517,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
||||||
tags := map[string]*string{}
|
tags := map[string]*string{}
|
||||||
networkInterfaces := []string{}
|
networkInterfaces := []string{}
|
||||||
var computerName string
|
var computerName string
|
||||||
|
var size string
|
||||||
|
|
||||||
if vm.Tags != nil {
|
if vm.Tags != nil {
|
||||||
tags = vm.Tags
|
tags = vm.Tags
|
||||||
|
@ -525,10 +529,13 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.VirtualMachineProperties != nil &&
|
if vm.VirtualMachineProperties != nil {
|
||||||
vm.VirtualMachineProperties.OsProfile != nil &&
|
if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
|
||||||
vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
|
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
|
||||||
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
|
}
|
||||||
|
if vm.VirtualMachineProperties.HardwareProfile != nil {
|
||||||
|
size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return virtualMachine{
|
return virtualMachine{
|
||||||
|
@ -541,6 +548,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
||||||
ScaleSet: "",
|
ScaleSet: "",
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: networkInterfaces,
|
NetworkInterfaces: networkInterfaces,
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,6 +557,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
|
||||||
tags := map[string]*string{}
|
tags := map[string]*string{}
|
||||||
networkInterfaces := []string{}
|
networkInterfaces := []string{}
|
||||||
var computerName string
|
var computerName string
|
||||||
|
var size string
|
||||||
|
|
||||||
if vm.Tags != nil {
|
if vm.Tags != nil {
|
||||||
tags = vm.Tags
|
tags = vm.Tags
|
||||||
|
@ -560,8 +569,13 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.VirtualMachineScaleSetVMProperties != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile != nil {
|
if vm.VirtualMachineScaleSetVMProperties != nil {
|
||||||
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
|
if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil {
|
||||||
|
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
|
||||||
|
}
|
||||||
|
if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil {
|
||||||
|
size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return virtualMachine{
|
return virtualMachine{
|
||||||
|
@ -574,6 +588,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
|
||||||
ScaleSet: scaleSetName,
|
ScaleSet: scaleSetName,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: networkInterfaces,
|
NetworkInterfaces: networkInterfaces,
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ func TestMain(m *testing.M) {
|
||||||
func TestMapFromVMWithEmptyTags(t *testing.T) {
|
func TestMapFromVMWithEmptyTags(t *testing.T) {
|
||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
|
size := "size"
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
|
@ -44,6 +45,9 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
|
HardwareProfile: &compute.HardwareProfile{
|
||||||
|
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachine{
|
testVM := compute.VirtualMachine{
|
||||||
|
@ -64,6 +68,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
|
||||||
OsType: "Linux",
|
OsType: "Linux",
|
||||||
Tags: map[string]*string{},
|
Tags: map[string]*string{},
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
actualVM := mapFromVM(testVM)
|
actualVM := mapFromVM(testVM)
|
||||||
|
@ -74,6 +79,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
|
||||||
func TestMapFromVMWithTags(t *testing.T) {
|
func TestMapFromVMWithTags(t *testing.T) {
|
||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
|
size := "size"
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
|
@ -93,6 +99,9 @@ func TestMapFromVMWithTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
|
HardwareProfile: &compute.HardwareProfile{
|
||||||
|
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachine{
|
testVM := compute.VirtualMachine{
|
||||||
|
@ -113,6 +122,7 @@ func TestMapFromVMWithTags(t *testing.T) {
|
||||||
OsType: "Linux",
|
OsType: "Linux",
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
actualVM := mapFromVM(testVM)
|
actualVM := mapFromVM(testVM)
|
||||||
|
@ -123,6 +133,7 @@ func TestMapFromVMWithTags(t *testing.T) {
|
||||||
func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
|
size := "size"
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
|
@ -139,6 +150,9 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
|
HardwareProfile: &compute.HardwareProfile{
|
||||||
|
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachineScaleSetVM{
|
testVM := compute.VirtualMachineScaleSetVM{
|
||||||
|
@ -161,6 +175,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
Tags: map[string]*string{},
|
Tags: map[string]*string{},
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
ScaleSet: scaleSet,
|
ScaleSet: scaleSet,
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
|
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
|
||||||
|
@ -171,6 +186,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
||||||
func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
|
size := "size"
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
|
@ -190,6 +206,9 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
|
HardwareProfile: &compute.HardwareProfile{
|
||||||
|
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachineScaleSetVM{
|
testVM := compute.VirtualMachineScaleSetVM{
|
||||||
|
@ -212,6 +231,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
NetworkInterfaces: []string{},
|
NetworkInterfaces: []string{},
|
||||||
ScaleSet: scaleSet,
|
ScaleSet: scaleSet,
|
||||||
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
|
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
|
||||||
|
|
|
@ -60,6 +60,8 @@ const (
|
||||||
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
|
datacenterLabel = model.MetaLabelPrefix + "consul_dc"
|
||||||
// namespaceLabel is the name of the label containing the namespace (Consul Enterprise only).
|
// namespaceLabel is the name of the label containing the namespace (Consul Enterprise only).
|
||||||
namespaceLabel = model.MetaLabelPrefix + "consul_namespace"
|
namespaceLabel = model.MetaLabelPrefix + "consul_namespace"
|
||||||
|
// partitionLabel is the name of the label containing the Admin Partition (Consul Enterprise only).
|
||||||
|
partitionLabel = model.MetaLabelPrefix + "consul_partition"
|
||||||
// taggedAddressesLabel is the prefix for the labels mapping to a target's tagged addresses.
|
// taggedAddressesLabel is the prefix for the labels mapping to a target's tagged addresses.
|
||||||
taggedAddressesLabel = model.MetaLabelPrefix + "consul_tagged_address_"
|
taggedAddressesLabel = model.MetaLabelPrefix + "consul_tagged_address_"
|
||||||
// serviceIDLabel is the name of the label containing the service ID.
|
// serviceIDLabel is the name of the label containing the service ID.
|
||||||
|
@ -109,9 +111,11 @@ func init() {
|
||||||
// SDConfig is the configuration for Consul service discovery.
|
// SDConfig is the configuration for Consul service discovery.
|
||||||
type SDConfig struct {
|
type SDConfig struct {
|
||||||
Server string `yaml:"server,omitempty"`
|
Server string `yaml:"server,omitempty"`
|
||||||
|
PathPrefix string `yaml:"path_prefix,omitempty"`
|
||||||
Token config.Secret `yaml:"token,omitempty"`
|
Token config.Secret `yaml:"token,omitempty"`
|
||||||
Datacenter string `yaml:"datacenter,omitempty"`
|
Datacenter string `yaml:"datacenter,omitempty"`
|
||||||
Namespace string `yaml:"namespace,omitempty"`
|
Namespace string `yaml:"namespace,omitempty"`
|
||||||
|
Partition string `yaml:"partition,omitempty"`
|
||||||
TagSeparator string `yaml:"tag_separator,omitempty"`
|
TagSeparator string `yaml:"tag_separator,omitempty"`
|
||||||
Scheme string `yaml:"scheme,omitempty"`
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
Username string `yaml:"username,omitempty"`
|
Username string `yaml:"username,omitempty"`
|
||||||
|
@ -183,6 +187,7 @@ type Discovery struct {
|
||||||
client *consul.Client
|
client *consul.Client
|
||||||
clientDatacenter string
|
clientDatacenter string
|
||||||
clientNamespace string
|
clientNamespace string
|
||||||
|
clientPartition string
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
watchedServices []string // Set of services which will be discovered.
|
watchedServices []string // Set of services which will be discovered.
|
||||||
watchedTags []string // Tags used to filter instances of a service.
|
watchedTags []string // Tags used to filter instances of a service.
|
||||||
|
@ -207,9 +212,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
|
|
||||||
clientConf := &consul.Config{
|
clientConf := &consul.Config{
|
||||||
Address: conf.Server,
|
Address: conf.Server,
|
||||||
|
PathPrefix: conf.PathPrefix,
|
||||||
Scheme: conf.Scheme,
|
Scheme: conf.Scheme,
|
||||||
Datacenter: conf.Datacenter,
|
Datacenter: conf.Datacenter,
|
||||||
Namespace: conf.Namespace,
|
Namespace: conf.Namespace,
|
||||||
|
Partition: conf.Partition,
|
||||||
Token: string(conf.Token),
|
Token: string(conf.Token),
|
||||||
HttpClient: wrapper,
|
HttpClient: wrapper,
|
||||||
}
|
}
|
||||||
|
@ -227,6 +234,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||||
clientDatacenter: conf.Datacenter,
|
clientDatacenter: conf.Datacenter,
|
||||||
clientNamespace: conf.Namespace,
|
clientNamespace: conf.Namespace,
|
||||||
|
clientPartition: conf.Partition,
|
||||||
finalizer: wrapper.CloseIdleConnections,
|
finalizer: wrapper.CloseIdleConnections,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
@ -547,6 +555,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
addressLabel: model.LabelValue(serviceNode.Node.Address),
|
addressLabel: model.LabelValue(serviceNode.Node.Address),
|
||||||
nodeLabel: model.LabelValue(serviceNode.Node.Node),
|
nodeLabel: model.LabelValue(serviceNode.Node.Node),
|
||||||
namespaceLabel: model.LabelValue(serviceNode.Service.Namespace),
|
namespaceLabel: model.LabelValue(serviceNode.Service.Namespace),
|
||||||
|
partitionLabel: model.LabelValue(serviceNode.Service.Partition),
|
||||||
tagsLabel: model.LabelValue(tags),
|
tagsLabel: model.LabelValue(tags),
|
||||||
serviceAddressLabel: model.LabelValue(serviceNode.Service.Address),
|
serviceAddressLabel: model.LabelValue(serviceNode.Service.Address),
|
||||||
servicePortLabel: model.LabelValue(strconv.Itoa(serviceNode.Service.Port)),
|
servicePortLabel: model.LabelValue(strconv.Itoa(serviceNode.Service.Port)),
|
||||||
|
|
|
@ -365,7 +365,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
{
|
{
|
||||||
// Define a handler that will return status 500.
|
// Define a handler that will return status 500.
|
||||||
handler: func(w http.ResponseWriter, r *http.Request) {
|
handler: func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
},
|
},
|
||||||
errMessage: "Unexpected response code: 500 ()",
|
errMessage: "Unexpected response code: 500 ()",
|
||||||
},
|
},
|
||||||
|
|
|
@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
||||||
for _, lname := range conf.NameList(name) {
|
for _, lname := range conf.NameList(name) {
|
||||||
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
||||||
|
|
||||||
if err != nil {
|
switch {
|
||||||
|
case err != nil:
|
||||||
// We can't go home yet, because a later name
|
// We can't go home yet, because a later name
|
||||||
// may give us a valid, successful answer. However
|
// may give us a valid, successful answer. However
|
||||||
// we can no longer say "this name definitely doesn't
|
// we can no longer say "this name definitely doesn't
|
||||||
// exist", because we did not get that answer for
|
// exist", because we did not get that answer for
|
||||||
// at least one name.
|
// at least one name.
|
||||||
allResponsesValid = false
|
allResponsesValid = false
|
||||||
} else if response.Rcode == dns.RcodeSuccess {
|
case response.Rcode == dns.RcodeSuccess:
|
||||||
// Outcome 1: GOLD!
|
// Outcome 1: GOLD!
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if allResponsesValid {
|
if allResponsesValid {
|
||||||
// Outcome 2: everyone says NXDOMAIN, that's good enough for me
|
// Outcome 2: everyone says NXDOMAIN, that's good enough for me.
|
||||||
return &dns.Msg{}, nil
|
return &dns.Msg{}, nil
|
||||||
}
|
}
|
||||||
// Outcome 3: boned.
|
// Outcome 3: boned.
|
||||||
|
|
|
@ -39,18 +39,23 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
fileSDReadErrorsCount = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_file_read_errors_total",
|
||||||
|
Help: "The number of File-SD read errors.",
|
||||||
|
})
|
||||||
fileSDScanDuration = prometheus.NewSummary(
|
fileSDScanDuration = prometheus.NewSummary(
|
||||||
prometheus.SummaryOpts{
|
prometheus.SummaryOpts{
|
||||||
Name: "prometheus_sd_file_scan_duration_seconds",
|
Name: "prometheus_sd_file_scan_duration_seconds",
|
||||||
Help: "The duration of the File-SD scan in seconds.",
|
Help: "The duration of the File-SD scan in seconds.",
|
||||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||||
})
|
})
|
||||||
fileSDReadErrorsCount = prometheus.NewCounter(
|
fileSDTimeStamp = NewTimestampCollector()
|
||||||
|
fileWatcherErrorsCount = prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Name: "prometheus_sd_file_read_errors_total",
|
Name: "prometheus_sd_file_watcher_errors_total",
|
||||||
Help: "The number of File-SD read errors.",
|
Help: "The number of File-SD errors caused by filesystem watch failures.",
|
||||||
})
|
})
|
||||||
fileSDTimeStamp = NewTimestampCollector()
|
|
||||||
|
|
||||||
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
|
patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`)
|
||||||
|
|
||||||
|
@ -62,7 +67,7 @@ var (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp)
|
prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for file based discovery.
|
// SDConfig is the configuration for file based discovery.
|
||||||
|
@ -221,8 +226,8 @@ func (d *Discovery) watchFiles() {
|
||||||
panic("no watcher configured")
|
panic("no watcher configured")
|
||||||
}
|
}
|
||||||
for _, p := range d.paths {
|
for _, p := range d.paths {
|
||||||
if idx := strings.LastIndex(p, "/"); idx > -1 {
|
if dir, _ := filepath.Split(p); dir != "" {
|
||||||
p = p[:idx]
|
p = dir
|
||||||
} else {
|
} else {
|
||||||
p = "./"
|
p = "./"
|
||||||
}
|
}
|
||||||
|
@ -237,6 +242,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
||||||
|
fileWatcherErrorsCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.watcher = watcher
|
d.watcher = watcher
|
||||||
|
|
|
@ -59,7 +59,7 @@ type hcloudDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||||
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
|
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
||||||
d := &hcloudDiscovery{
|
d := &hcloudDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||||
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
|
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
||||||
d := &robotDiscovery{
|
d := &robotDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
endpoint: conf.robotEndpoint,
|
endpoint: conf.robotEndpoint,
|
||||||
|
@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -33,6 +33,7 @@ import (
|
||||||
_ "github.com/prometheus/prometheus/discovery/moby" // register moby
|
_ "github.com/prometheus/prometheus/discovery/moby" // register moby
|
||||||
_ "github.com/prometheus/prometheus/discovery/nomad" // register nomad
|
_ "github.com/prometheus/prometheus/discovery/nomad" // register nomad
|
||||||
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
|
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
|
||||||
|
_ "github.com/prometheus/prometheus/discovery/ovhcloud" // register ovhcloud
|
||||||
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
|
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
|
||||||
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
|
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
|
||||||
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
|
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
|
||||||
|
|
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
||||||
datacenterID string
|
datacenterID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) {
|
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
||||||
d := &serverDiscovery{
|
d := &serverDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
datacenterID: conf.DatacenterID,
|
datacenterID: conf.DatacenterID,
|
||||||
|
|
|
@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
|
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
|
||||||
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
|
func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
|
||||||
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
|
||||||
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
|
||||||
// Retries are not used so the metric is omitted.
|
// Retries are not used so the metric is omitted.
|
||||||
return noopMetric{}
|
return noopMetric{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
// nolint:revive // Many legitimately empty blocks in this file.
|
||||||
package kubernetes
|
package kubernetes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -72,7 +73,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
queue: workqueue.NewNamed("endpoints"),
|
queue: workqueue.NewNamed("endpoints"),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
epAddCount.Inc()
|
epAddCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
|
@ -86,6 +87,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
|
@ -106,7 +110,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
// TODO(fabxc): potentially remove add and delete event handlers. Those should
|
// TODO(fabxc): potentially remove add and delete event handlers. Those should
|
||||||
// be triggered via the endpoint handlers already.
|
// be triggered via the endpoint handlers already.
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
|
@ -122,8 +126,11 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||||
|
}
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
|
@ -137,6 +144,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return e
|
return e
|
||||||
|
@ -295,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
if addr.NodeName != nil {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
||||||
|
} else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := e.resolvePodRef(addr.TargetRef)
|
pod := e.resolvePodRef(addr.TargetRef)
|
||||||
|
@ -375,18 +389,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
// PodIP can be empty when a pod is starting or has been evicted.
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
if len(pe.pod.Status.PodIP) != 0 {
|
||||||
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -456,5 +473,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L
|
||||||
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
||||||
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
||||||
}
|
}
|
||||||
|
|
||||||
return tg.Merge(nodeLabelset)
|
return tg.Merge(nodeLabelset)
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Addresses: []v1.EndpointAddress{
|
||||||
|
{
|
||||||
|
IP: "6.7.8.9",
|
||||||
|
TargetRef: &v1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: "testport",
|
||||||
|
Port: 9002,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
||||||
|
|
||||||
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
svc := &v1.Service{
|
svc := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
|
@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node)
|
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
|
@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
nodes := makeNode("foobar", "", "", nodeLabels, nil)
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
svc := &v1.Service{
|
svc := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc)
|
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
afterStart: func() {
|
afterStart: func() {
|
||||||
nodes.Labels["az"] = "eu-central1"
|
node1.Labels["az"] = "eu-central1"
|
||||||
c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{})
|
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||||
},
|
},
|
||||||
expectedMaxItems: 2,
|
expectedMaxItems: 2,
|
||||||
expectedRes: map[string]*targetgroup.Group{
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
|
@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "true",
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
"__meta_kubernetes_node_label_az": "eu-central1",
|
"__meta_kubernetes_node_label_az": "us-east1",
|
||||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
"__meta_kubernetes_node_name": "foobar",
|
"__meta_kubernetes_node_name": "foobar",
|
||||||
},
|
},
|
||||||
|
@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "ns1",
|
"__meta_kubernetes_namespace": "ns1",
|
||||||
|
@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "own-ns",
|
"__meta_kubernetes_namespace": "own-ns",
|
||||||
|
@ -825,3 +909,46 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
||||||
|
ep := makeEndpoints()
|
||||||
|
ep.Namespace = "ns"
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "ns",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "p1",
|
||||||
|
Image: "p1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: v1.PodStatus{},
|
||||||
|
}
|
||||||
|
|
||||||
|
objs := []runtime.Object{
|
||||||
|
ep,
|
||||||
|
pod,
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 0,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
queue: workqueue.NewNamed("endpointSlice"),
|
queue: workqueue.NewNamed("endpointSlice"),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
epslAddCount.Inc()
|
epslAddCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
|
@ -87,6 +87,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
|
@ -109,7 +112,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
|
@ -123,9 +126,12 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
|
@ -139,6 +145,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return e
|
return e
|
||||||
|
@ -181,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for e.process(ctx, ch) {
|
for e.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -250,6 +259,8 @@ const (
|
||||||
endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port"
|
endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port"
|
||||||
endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol"
|
endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol"
|
||||||
endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready"
|
endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready"
|
||||||
|
endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving"
|
||||||
|
endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating"
|
||||||
endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname"
|
endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname"
|
||||||
endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind"
|
endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind"
|
||||||
endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name"
|
endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name"
|
||||||
|
@ -289,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
}
|
}
|
||||||
|
|
||||||
if port.protocol() != nil {
|
if port.protocol() != nil {
|
||||||
target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol()))
|
target[endpointSlicePortProtocolLabel] = lv(*port.protocol())
|
||||||
}
|
}
|
||||||
|
|
||||||
if port.port() != nil {
|
if port.port() != nil {
|
||||||
|
@ -304,6 +315,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready()))
|
target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ep.conditions().serving() != nil {
|
||||||
|
target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ep.conditions().terminating() != nil {
|
||||||
|
target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating()))
|
||||||
|
}
|
||||||
|
|
||||||
if ep.hostname() != nil {
|
if ep.hostname() != nil {
|
||||||
target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname())
|
target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname())
|
||||||
}
|
}
|
||||||
|
@ -320,7 +339,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
if ep.targetRef() != nil && ep.targetRef().Kind == "Node" {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name)
|
||||||
|
} else {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := e.resolvePodRef(ep.targetRef())
|
pod := e.resolvePodRef(ep.targetRef())
|
||||||
|
@ -393,18 +416,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
// PodIP can be empty when a pod is starting or has been evicted.
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
if len(pe.pod.Status.PodIP) != 0 {
|
||||||
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,6 +49,8 @@ type endpointSliceEndpointAdaptor interface {
|
||||||
|
|
||||||
type endpointSliceEndpointConditionsAdaptor interface {
|
type endpointSliceEndpointConditionsAdaptor interface {
|
||||||
ready() *bool
|
ready() *bool
|
||||||
|
serving() *bool
|
||||||
|
terminating() *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adaptor for k8s.io/api/discovery/v1
|
// Adaptor for k8s.io/api/discovery/v1
|
||||||
|
@ -193,6 +195,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool {
|
||||||
return e.endpointConditions.Ready
|
return e.endpointConditions.Ready
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool {
|
||||||
|
return e.endpointConditions.Serving
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool {
|
||||||
|
return e.endpointConditions.Terminating
|
||||||
|
}
|
||||||
|
|
||||||
type endpointSliceEndpointAdaptorV1beta1 struct {
|
type endpointSliceEndpointAdaptorV1beta1 struct {
|
||||||
endpoint v1beta1.Endpoint
|
endpoint v1beta1.Endpoint
|
||||||
}
|
}
|
||||||
|
@ -237,6 +247,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool {
|
||||||
return e.endpointConditions.Ready
|
return e.endpointConditions.Ready
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool {
|
||||||
|
return e.endpointConditions.Serving
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool {
|
||||||
|
return e.endpointConditions.Terminating
|
||||||
|
}
|
||||||
|
|
||||||
type endpointSlicePortAdaptorV1 struct {
|
type endpointSlicePortAdaptorV1 struct {
|
||||||
endpointPort v1.EndpointPort
|
endpointPort v1.EndpointPort
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,8 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
||||||
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
|
||||||
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].DeprecatedTopology, endpointAdaptor.topology())
|
require.Equal(t, endpointSlice.Endpoints[i].DeprecatedTopology, endpointAdaptor.topology())
|
||||||
}
|
}
|
||||||
|
@ -61,6 +63,8 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
||||||
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
|
||||||
|
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
|
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,23 +64,42 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
||||||
},
|
},
|
||||||
Endpoints: []v1.Endpoint{
|
Endpoints: []v1.Endpoint{
|
||||||
{
|
{
|
||||||
Addresses: []string{"1.2.3.4"},
|
Addresses: []string{"1.2.3.4"},
|
||||||
Conditions: v1.EndpointConditions{Ready: boolptr(true)},
|
Conditions: v1.EndpointConditions{
|
||||||
Hostname: strptr("testendpoint1"),
|
Ready: boolptr(true),
|
||||||
TargetRef: &corev1.ObjectReference{},
|
Serving: boolptr(true),
|
||||||
NodeName: strptr("foobar"),
|
Terminating: boolptr(false),
|
||||||
|
},
|
||||||
|
Hostname: strptr("testendpoint1"),
|
||||||
|
TargetRef: &corev1.ObjectReference{},
|
||||||
|
NodeName: strptr("foobar"),
|
||||||
DeprecatedTopology: map[string]string{
|
DeprecatedTopology: map[string]string{
|
||||||
"topology": "value",
|
"topology": "value",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Addresses: []string{"2.3.4.5"},
|
Addresses: []string{"2.3.4.5"},
|
||||||
Conditions: v1.EndpointConditions{
|
Conditions: v1.EndpointConditions{
|
||||||
Ready: boolptr(true),
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Addresses: []string{"3.4.5.6"},
|
Addresses: []string{"3.4.5.6"},
|
||||||
Conditions: v1.EndpointConditions{
|
Conditions: v1.EndpointConditions{
|
||||||
Ready: boolptr(false),
|
Ready: boolptr(false),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(true),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"4.5.6.7"},
|
||||||
|
Conditions: v1.EndpointConditions{
|
||||||
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
|
},
|
||||||
|
TargetRef: &corev1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -111,12 +130,27 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
||||||
}, {
|
}, {
|
||||||
Addresses: []string{"2.3.4.5"},
|
Addresses: []string{"2.3.4.5"},
|
||||||
Conditions: v1beta1.EndpointConditions{
|
Conditions: v1beta1.EndpointConditions{
|
||||||
Ready: boolptr(true),
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Addresses: []string{"3.4.5.6"},
|
Addresses: []string{"3.4.5.6"},
|
||||||
Conditions: v1beta1.EndpointConditions{
|
Conditions: v1beta1.EndpointConditions{
|
||||||
Ready: boolptr(false),
|
Ready: boolptr(false),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(true),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"4.5.6.7"},
|
||||||
|
Conditions: v1beta1.EndpointConditions{
|
||||||
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
|
},
|
||||||
|
TargetRef: &corev1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -141,6 +175,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -151,19 +187,35 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -199,17 +251,32 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -367,6 +434,8 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -377,19 +446,35 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: map[model.LabelName]model.LabelValue{
|
Labels: map[model.LabelName]model.LabelValue{
|
||||||
|
@ -445,6 +530,8 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -455,19 +542,35 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -512,6 +615,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -522,19 +627,35 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -574,6 +695,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -584,19 +707,35 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -652,6 +791,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -662,19 +803,35 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -695,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||||
|
|
||||||
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
svc := &corev1.Service{
|
svc := &corev1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
|
@ -705,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc}
|
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc}
|
||||||
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
|
@ -719,6 +877,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -732,19 +892,38 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -763,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
|
|
||||||
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
svc := &corev1.Service{
|
svc := &corev1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
|
@ -773,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
objs := []runtime.Object{makeEndpointSliceV1(), node, svc}
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
|
objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc}
|
||||||
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
expectedMaxItems: 2,
|
expectedMaxItems: 2,
|
||||||
afterStart: func() {
|
afterStart: func() {
|
||||||
node.Labels["az"] = "us-central1"
|
node1.Labels["az"] = "us-central1"
|
||||||
c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
|
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||||
},
|
},
|
||||||
expectedRes: map[string]*targetgroup.Group{
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
"endpointslice/default/testendpoints": {
|
"endpointslice/default/testendpoints": {
|
||||||
|
@ -792,6 +973,8 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -799,25 +982,44 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_node_label_az": "us-central1",
|
"__meta_kubernetes_node_label_az": "us-east1",
|
||||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
"__meta_kubernetes_node_name": "foobar",
|
"__meta_kubernetes_node_name": "foobar",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -913,6 +1115,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -923,19 +1127,35 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1039,6 +1259,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
||||||
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
"__meta_kubernetes_endpointslice_address_target_kind": "",
|
||||||
"__meta_kubernetes_endpointslice_address_target_name": "",
|
"__meta_kubernetes_endpointslice_address_target_name": "",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
|
||||||
|
@ -1049,19 +1271,35 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "2.3.4.5:9000",
|
"__address__": "2.3.4.5:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "3.4.5.6:9000",
|
"__address__": "3.4.5.6:9000",
|
||||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
|
||||||
"__meta_kubernetes_endpointslice_port": "9000",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1074,3 +1312,46 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
|
||||||
|
ep := makeEndpointSliceV1()
|
||||||
|
ep.Namespace = "ns"
|
||||||
|
|
||||||
|
pod := &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "ns",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "p1",
|
||||||
|
Image: "p1:latest",
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{},
|
||||||
|
}
|
||||||
|
|
||||||
|
objs := []runtime.Object{
|
||||||
|
ep,
|
||||||
|
pod,
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 0,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ type Ingress struct {
|
||||||
// NewIngress returns a new ingress discovery.
|
// NewIngress returns a new ingress discovery.
|
||||||
func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
|
func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
|
||||||
s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")}
|
s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")}
|
||||||
s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
ingressAddCount.Inc()
|
ingressAddCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
|
@ -62,6 +62,9 @@ func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err)
|
||||||
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for i.process(ctx, ch) {
|
for i.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
err error
|
err error
|
||||||
ownNamespace string
|
ownNamespace string
|
||||||
)
|
)
|
||||||
if conf.KubeConfig != "" {
|
switch {
|
||||||
|
case conf.KubeConfig != "":
|
||||||
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if conf.APIServer.URL == nil {
|
case conf.APIServer.URL == nil:
|
||||||
// Use the Kubernetes provided pod service account
|
// Use the Kubernetes provided pod service account
|
||||||
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
||||||
kcfg, err = rest.InClusterConfig()
|
kcfg, err = rest.InClusterConfig()
|
||||||
|
@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||||
} else {
|
default:
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -382,7 +383,8 @@ func mapSelector(rawSelector []SelectorConfig) roleSelector {
|
||||||
return rs
|
return rs
|
||||||
}
|
}
|
||||||
|
|
||||||
const resyncPeriod = 10 * time.Minute
|
// Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics.
|
||||||
|
const resyncDisabled = 0
|
||||||
|
|
||||||
// Run implements the discoverer interface.
|
// Run implements the discoverer interface.
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
@ -475,8 +477,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
eps := NewEndpointSlice(
|
eps := NewEndpointSlice(
|
||||||
log.With(d.logger, "role", "endpointslice"),
|
log.With(d.logger, "role", "endpointslice"),
|
||||||
informer,
|
informer,
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
nodeInf,
|
nodeInf,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, eps)
|
d.discoverers = append(d.discoverers, eps)
|
||||||
|
@ -534,8 +536,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
eps := NewEndpoints(
|
eps := NewEndpoints(
|
||||||
log.With(d.logger, "role", "endpoint"),
|
log.With(d.logger, "role", "endpoint"),
|
||||||
d.newEndpointsByNodeInformer(elw),
|
d.newEndpointsByNodeInformer(elw),
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
|
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
nodeInf,
|
nodeInf,
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, eps)
|
d.discoverers = append(d.discoverers, eps)
|
||||||
|
@ -589,7 +591,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
svc := NewService(
|
svc := NewService(
|
||||||
log.With(d.logger, "role", "service"),
|
log.With(d.logger, "role", "service"),
|
||||||
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
|
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
)
|
)
|
||||||
d.discoverers = append(d.discoverers, svc)
|
d.discoverers = append(d.discoverers, svc)
|
||||||
go svc.informer.Run(ctx.Done())
|
go svc.informer.Run(ctx.Done())
|
||||||
|
@ -627,7 +629,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
return i.Watch(ctx, options)
|
return i.Watch(ctx, options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod)
|
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||||
} else {
|
} else {
|
||||||
i := d.client.NetworkingV1beta1().Ingresses(namespace)
|
i := d.client.NetworkingV1beta1().Ingresses(namespace)
|
||||||
ilw := &cache.ListWatch{
|
ilw := &cache.ListWatch{
|
||||||
|
@ -642,7 +644,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
return i.Watch(ctx, options)
|
return i.Watch(ctx, options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod)
|
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
|
||||||
}
|
}
|
||||||
ingress := NewIngress(
|
ingress := NewIngress(
|
||||||
log.With(d.logger, "role", "ingress"),
|
log.With(d.logger, "role", "ingress"),
|
||||||
|
@ -732,7 +734,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
|
||||||
return d.client.CoreV1().Nodes().Watch(ctx, options)
|
return d.client.CoreV1().Nodes().Watch(ctx, options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod)
|
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||||
|
@ -747,39 +749,45 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncPeriod, indexers)
|
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||||
indexers := make(map[string]cache.IndexFunc)
|
indexers := make(map[string]cache.IndexFunc)
|
||||||
if !d.attachMetadata.Node {
|
if !d.attachMetadata.Node {
|
||||||
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
|
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
|
||||||
}
|
}
|
||||||
|
|
||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||||
e, ok := obj.(*apiv1.Endpoints)
|
e, ok := obj.(*apiv1.Endpoints)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("object is not a pod")
|
return nil, fmt.Errorf("object is not endpoints")
|
||||||
}
|
}
|
||||||
var nodes []string
|
var nodes []string
|
||||||
for _, target := range e.Subsets {
|
for _, target := range e.Subsets {
|
||||||
for _, addr := range target.Addresses {
|
for _, addr := range target.Addresses {
|
||||||
if addr.NodeName == nil {
|
if addr.TargetRef != nil {
|
||||||
continue
|
switch addr.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if addr.NodeName != nil {
|
||||||
|
nodes = append(nodes, *addr.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, addr.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *addr.NodeName)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers)
|
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
|
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
|
||||||
indexers := make(map[string]cache.IndexFunc)
|
indexers := make(map[string]cache.IndexFunc)
|
||||||
if !d.attachMetadata.Node {
|
if !d.attachMetadata.Node {
|
||||||
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncPeriod, indexers)
|
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers)
|
||||||
}
|
}
|
||||||
|
|
||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||||
|
@ -787,17 +795,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
||||||
switch e := obj.(type) {
|
switch e := obj.(type) {
|
||||||
case *disv1.EndpointSlice:
|
case *disv1.EndpointSlice:
|
||||||
for _, target := range e.Endpoints {
|
for _, target := range e.Endpoints {
|
||||||
if target.NodeName == nil {
|
if target.TargetRef != nil {
|
||||||
continue
|
switch target.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if target.NodeName != nil {
|
||||||
|
nodes = append(nodes, *target.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, target.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *target.NodeName)
|
|
||||||
}
|
}
|
||||||
case *disv1beta1.EndpointSlice:
|
case *disv1beta1.EndpointSlice:
|
||||||
for _, target := range e.Endpoints {
|
for _, target := range e.Endpoints {
|
||||||
if target.NodeName == nil {
|
if target.TargetRef != nil {
|
||||||
continue
|
switch target.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if target.NodeName != nil {
|
||||||
|
nodes = append(nodes, *target.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, target.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *target.NodeName)
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("object is not an endpointslice")
|
return nil, fmt.Errorf("object is not an endpointslice")
|
||||||
|
@ -806,7 +826,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return cache.NewSharedIndexInformer(plw, object, resyncPeriod, indexers)
|
return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")}
|
n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")}
|
||||||
n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
nodeAddCount.Inc()
|
nodeAddCount.Inc()
|
||||||
n.enqueue(o)
|
n.enqueue(o)
|
||||||
|
@ -69,6 +69,9 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node {
|
||||||
n.enqueue(o)
|
n.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for n.process(ctx, ch) {
|
for n.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -206,7 +209,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
|
||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeAddresses returns the provided node's address, based on the priority:
|
// nodeAddress returns the provided node's address, based on the priority:
|
||||||
// 1. NodeInternalIP
|
// 1. NodeInternalIP
|
||||||
// 2. NodeInternalDNS
|
// 2. NodeInternalDNS
|
||||||
// 3. NodeExternalIP
|
// 3. NodeExternalIP
|
||||||
|
|
|
@ -65,7 +65,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
||||||
logger: l,
|
logger: l,
|
||||||
queue: workqueue.NewNamed("pod"),
|
queue: workqueue.NewNamed("pod"),
|
||||||
}
|
}
|
||||||
p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
podAddCount.Inc()
|
podAddCount.Inc()
|
||||||
p.enqueue(o)
|
p.enqueue(o)
|
||||||
|
@ -79,9 +79,12 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
||||||
p.enqueue(o)
|
p.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
if p.withNodeMetadata {
|
if p.withNodeMetadata {
|
||||||
p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
p.enqueuePodsForNode(node.Name)
|
p.enqueuePodsForNode(node.Name)
|
||||||
|
@ -95,6 +98,9 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
||||||
p.enqueuePodsForNode(node.Name)
|
p.enqueuePodsForNode(node.Name)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return p
|
return p
|
||||||
|
@ -126,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for p.process(ctx, ch) {
|
for p.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -177,6 +183,7 @@ const (
|
||||||
podNameLabel = metaLabelPrefix + "pod_name"
|
podNameLabel = metaLabelPrefix + "pod_name"
|
||||||
podIPLabel = metaLabelPrefix + "pod_ip"
|
podIPLabel = metaLabelPrefix + "pod_ip"
|
||||||
podContainerNameLabel = metaLabelPrefix + "pod_container_name"
|
podContainerNameLabel = metaLabelPrefix + "pod_container_name"
|
||||||
|
podContainerIDLabel = metaLabelPrefix + "pod_container_id"
|
||||||
podContainerImageLabel = metaLabelPrefix + "pod_container_image"
|
podContainerImageLabel = metaLabelPrefix + "pod_container_image"
|
||||||
podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name"
|
podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name"
|
||||||
podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number"
|
podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number"
|
||||||
|
@ -242,6 +249,24 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containerName string) (*apiv1.ContainerStatus, error) {
|
||||||
|
for _, s := range *statuses {
|
||||||
|
if s.Name == containerName {
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("cannot find container with name %v", containerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string {
|
||||||
|
cStatus, err := p.findPodContainerStatus(statuses, containerName)
|
||||||
|
if err != nil {
|
||||||
|
level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return cStatus.ContainerID
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
||||||
tg := &targetgroup.Group{
|
tg := &targetgroup.Group{
|
||||||
Source: podSource(pod),
|
Source: podSource(pod),
|
||||||
|
@ -261,6 +286,12 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
||||||
for i, c := range containers {
|
for i, c := range containers {
|
||||||
isInit := i >= len(pod.Spec.Containers)
|
isInit := i >= len(pod.Spec.Containers)
|
||||||
|
|
||||||
|
cStatuses := &pod.Status.ContainerStatuses
|
||||||
|
if isInit {
|
||||||
|
cStatuses = &pod.Status.InitContainerStatuses
|
||||||
|
}
|
||||||
|
cID := p.findPodContainerID(cStatuses, c.Name)
|
||||||
|
|
||||||
// If no ports are defined for the container, create an anonymous
|
// If no ports are defined for the container, create an anonymous
|
||||||
// target per container.
|
// target per container.
|
||||||
if len(c.Ports) == 0 {
|
if len(c.Ports) == 0 {
|
||||||
|
@ -269,6 +300,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
||||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||||
model.AddressLabel: lv(pod.Status.PodIP),
|
model.AddressLabel: lv(pod.Status.PodIP),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
podContainerIDLabel: lv(cID),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||||
})
|
})
|
||||||
|
@ -282,6 +314,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
||||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||||
model.AddressLabel: lv(addr),
|
model.AddressLabel: lv(addr),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
podContainerIDLabel: lv(cID),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortNameLabel: lv(port.Name),
|
podContainerPortNameLabel: lv(port.Name),
|
||||||
|
|
|
@ -81,6 +81,16 @@ func makeMultiPortPods() *v1.Pod {
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "testcontainer0",
|
||||||
|
ContainerID: "docker://a1b2c3d4e5f6",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "testcontainer1",
|
||||||
|
ContainerID: "containerd://6f5e4d3c2b1a",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -118,6 +128,12 @@ func makePods() *v1.Pod {
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "testcontainer",
|
||||||
|
ContainerID: "docker://a1b2c3d4e5f6",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,6 +178,18 @@ func makeInitContainerPods() *v1.Pod {
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "testcontainer",
|
||||||
|
ContainerID: "docker://a1b2c3d4e5f6",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "initcontainer",
|
||||||
|
ContainerID: "containerd://6f5e4d3c2b1a",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -179,6 +207,7 @@ func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_container_init": "false",
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
"__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -230,6 +259,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_container_init": "false",
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
"__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "1.2.3.4:9001",
|
"__address__": "1.2.3.4:9001",
|
||||||
|
@ -239,12 +269,14 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "UDP",
|
"__meta_kubernetes_pod_container_port_protocol": "UDP",
|
||||||
"__meta_kubernetes_pod_container_init": "false",
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
"__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "1.2.3.4",
|
"__address__": "1.2.3.4",
|
||||||
"__meta_kubernetes_pod_container_name": "testcontainer1",
|
"__meta_kubernetes_pod_container_name": "testcontainer1",
|
||||||
"__meta_kubernetes_pod_container_image": "testcontainer1:latest",
|
"__meta_kubernetes_pod_container_image": "testcontainer1:latest",
|
||||||
"__meta_kubernetes_pod_container_init": "false",
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
"__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -280,6 +312,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_name": "initcontainer",
|
"__meta_kubernetes_pod_container_name": "initcontainer",
|
||||||
"__meta_kubernetes_pod_container_image": "initcontainer:latest",
|
"__meta_kubernetes_pod_container_image": "initcontainer:latest",
|
||||||
"__meta_kubernetes_pod_container_init": "true",
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
"__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a",
|
||||||
})
|
})
|
||||||
expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending"
|
expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending"
|
||||||
expected[key].Labels["__meta_kubernetes_pod_ready"] = "false"
|
expected[key].Labels["__meta_kubernetes_pod_ready"] = "false"
|
||||||
|
|
|
@ -51,7 +51,7 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")}
|
s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")}
|
||||||
s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o interface{}) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
|
@ -65,6 +65,9 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service {
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||||
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for s.process(ctx, ch) {
|
for s.process(ctx, ch) { // nolint:revive
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue