diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a6df329beb..b75a7896ee 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,6 +2,6 @@ /web/ui/module @juliusv @nexucis /storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie /discovery/kubernetes @brancz -/tsdb @codesome -/promql @codesome @roidelapluie +/tsdb @jesusvazquez +/promql @roidelapluie /cmd/promtool @dgl diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index cf839b560f..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project. -title: '' -labels: '' -assignees: '' ---- - - -## Proposal -**Use case. Why is this important?** - -*“Nice to have” is not a good use case. :)* diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000..40f6f1388c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,23 @@ +--- +name: Feature request +description: Suggest an idea for this project. +body: + - type: markdown + attributes: + value: >- + Please do *NOT* ask support questions in Github issues. + + + If your issue is not a feature request or bug report use + our [community support](https://prometheus.io/community/). + + + There is also [commercial + support](https://prometheus.io/support-training/) available. + - type: textarea + attributes: + label: Proposal + description: Use case. Why is this important? + placeholder: “Nice to have” is not a good use case. :) + validations: + required: true diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml deleted file mode 100644 index 7b8e3605f0..0000000000 --- a/.github/actions/build/action.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Build -inputs: - thread: - type: integer - description: Current thread - required: true - default: 3 - parallelism: - type: integer - description: Number of builds to do in parallel - default: 3 - promu_opts: - type: string - description: Options to pass to promu -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - - run: ~/go/bin/promu crossbuild -v --parallelism ${{ inputs.parallelism }} --parallelism-thread ${{ inputs.thread }} ${{ inputs.promu_opts }} - shell: bash - - uses: ./.github/actions/save_artifacts - with: - directory: .build diff --git a/.github/actions/check_proto/action.yml b/.github/actions/check_proto/action.yml deleted file mode 100644 index 9d9d773e6c..0000000000 --- a/.github/actions/check_proto/action.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Check proto files -inputs: - version: - type: string - description: Protoc version - default: "3.5.1" -runs: - using: composite - steps: - - run: | - env - set -x - curl -s -L https://github.com/protocolbuffers/protobuf/releases/download/v${{ inputs.version }}/protoc-${{ inputs.version }}-linux-x86_64.zip > /tmp/protoc.zip - unzip -d /tmp /tmp/protoc.zip - chmod +x /tmp/bin/protoc - export PATH=/tmp/bin:$PATH - make proto - shell: bash - - run: git diff --exit-code - shell: bash diff --git a/.github/actions/publish_images/action.yml b/.github/actions/publish_images/action.yml deleted file mode 100644 index bc17eb7d97..0000000000 --- a/.github/actions/publish_images/action.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Publish image -inputs: - registry: - type: string - description: Docker registry - organization: - type: string - description: Organization - login: - type: string - description: Username - password: - type: string - description: Password - dockerfile_path: - description: Path to Dockerfile - type: string - default: "" - dockerbuild_context: - description: Path to Dockerbuild context - type: string - default: "" - container_image_name: - description: Name of the container image - type: string - default: "" -runs: - using: composite - steps: - - if: inputs.dockerfile_path != '' - run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.container_image_name != '' - run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.dockerbuild_context != '' - run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile - shell: bash - - run: | - touch /tmp/tmp-profile - . /tmp/tmp-profile - make docker DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - docker images - echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }} - make docker-publish DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - shell: bash diff --git a/.github/actions/publish_main/action.yml b/.github/actions/publish_main/action.yml deleted file mode 100644 index 3ff45c20a3..0000000000 --- a/.github/actions/publish_main/action.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Publish image -inputs: - docker_hub_organization: - type: string - description: DockerHub organization - default: prom - docker_hub_login: - type: string - description: DockerHub username - docker_hub_password: - type: string - description: DockerHub password - quay_io_organization: - type: string - description: Quay.io organization - default: prometheus - quay_io_login: - type: string - description: Quay.io username - quay_io_password: - type: string - description: Quay.io password -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - with: - enable_docker_multibuild: true - - uses: ./.github/actions/restore_artifacts - - uses: ./.github/actions/publish_images - if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != '' - with: - registry: docker.io - organization: ${{ inputs.docker_hub_organization }} - login: ${{ inputs.docker_hub_login }} - password: ${{ inputs.docker_hub_password }} - - uses: ./.github/actions/publish_images - if: inputs.quay_io_organization != '' && inputs.quay_io_login != '' - with: - registry: quay.io - organization: ${{ inputs.quay_io_organization }} - login: ${{ inputs.quay_io_login }} - password: ${{ inputs.quay_io_password }} diff --git a/.github/actions/publish_release/action.yml b/.github/actions/publish_release/action.yml deleted file mode 100644 index 1b6a0e3021..0000000000 --- a/.github/actions/publish_release/action.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Publish image -inputs: - docker_hub_organization: - type: string - description: DockerHub organization - default: prom - docker_hub_login: - type: string - description: DockerHub username - docker_hub_password: - type: string - description: DockerHub password - quay_io_organization: - type: string - description: Quay.io organization - default: prometheus - quay_io_login: - type: string - description: Quay.io username - quay_io_password: - type: string - description: Quay.io password - github_token: - type: string - description: Github Token -runs: - using: composite - steps: - - uses: ./.github/actions/setup_environment - with: - enable_docker_multibuild: true - - uses: ./.github/actions/restore_artifacts - - run: ~/go/bin/promu crossbuild tarballs - shell: bash - - run: ~/go/bin/promu checksum .tarballs - shell: bash - - run: ~/go/bin/promu release .tarballs - shell: bash - env: - GITHUB_TOKEN: ${{ inputs.github_token }} - - uses: ./.github/actions/publish_release_images - if: inputs.docker_hub_organization != '' && inputs.docker_hub_login != '' - with: - registry: docker.io - organization: ${{ inputs.docker_hub_organization }} - login: ${{ inputs.docker_hub_login }} - password: ${{ inputs.docker_hub_password }} - - uses: ./.github/actions/publish_release_images - if: inputs.quay_io_organization != '' && inputs.quay_io_login != '' - with: - registry: quay.io - organization: ${{ inputs.quay_io_organization }} - login: ${{ inputs.quay_io_login }} - password: ${{ inputs.quay_io_password }} diff --git a/.github/actions/publish_release_images/action.yml b/.github/actions/publish_release_images/action.yml deleted file mode 100644 index 3b61328947..0000000000 --- a/.github/actions/publish_release_images/action.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Publish release image -inputs: - registry: - type: string - description: Docker registry - organization: - type: string - description: Organization - login: - type: string - description: Username - password: - type: string - description: Password - dockerfile_path: - description: Path to Dockerfile - type: string - default: "" - dockerbuild_context: - description: Path to Dockerbuild context - type: string - default: "" - container_image_name: - description: Name of the container image - type: string - default: "" -runs: - using: composite - steps: - - if: inputs.dockerfile_path != '' - run: echo "export DOCKERFILE_PATH=${{ inputs.dockerfile_path }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.container_image_name != '' - run: echo "export DOCKER_IMAGE_NAME=${{ inputs.container_image_name }}" >> /tmp/tmp-profile - shell: bash - - if: inputs.dockerbuild_context != '' - run: echo "export DOCKERBUILD_CONTEXT=${{ inputs.dockerbuild_context }}" >> /tmp/tmp-profile - shell: bash - - run: | - current_tag=${GITHUB_REF#refs/*/} - touch /tmp/tmp-profile - . /tmp/tmp-profile - make docker DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - docker images - echo ${{ inputs.password }} | docker login -u ${{ inputs.login }} --password-stdin ${{ inputs.registry }} - make docker-publish DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - if [[ "$current_tag" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then - make docker-tag-latest DOCKER_IMAGE_TAG="$current_tag" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-publish DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - make docker-manifest DOCKER_IMAGE_TAG="latest" DOCKER_REPO=${{ inputs.registry }}/${{ inputs.organization }} - fi - shell: bash diff --git a/.github/actions/restore_artifacts/action.yml b/.github/actions/restore_artifacts/action.yml deleted file mode 100644 index a2d1b625e5..0000000000 --- a/.github/actions/restore_artifacts/action.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Restore artifacts created by save_artifacts. -# Tar is used because the default actions do not preserve directory structure -# and file mode. -name: Restore artifacts -runs: - using: composite - steps: - - name: Download all workflow run artifacts - uses: actions/download-artifact@v3 - with: - name: artifact - path: .artifacts - - run: | - for tar in .artifacts/*.tar - do - tar xvf $tar - done - rm -v .artifacts/*.tar - shell: bash diff --git a/.github/actions/save_artifacts/action.yml b/.github/actions/save_artifacts/action.yml deleted file mode 100644 index e0b0440d01..0000000000 --- a/.github/actions/save_artifacts/action.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Tar is used because the default actions do not preserve directory structure -# and file mode. -name: Save artifacts -inputs: - directory: - type: string - description: Path of the directory to save -runs: - using: composite - steps: - - run: | - tar cvf artifact.tar ${{ inputs.directory }} - mv artifact.tar artifact-$(sha1sum artifact.tar|awk '{ print $1 }').tar - shell: bash - - uses: actions/upload-artifact@v3 - with: - path: artifact-*.tar diff --git a/.github/actions/setup_environment/action.yml b/.github/actions/setup_environment/action.yml deleted file mode 100644 index 8c1d4d58ef..0000000000 --- a/.github/actions/setup_environment/action.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Setup environment -inputs: - enable_go: - type: boolean - description: Whether to enable go specific features, such as caching. - default: true - enable_npm: - type: boolean - description: Whether to enable npm specific features, such as caching. - default: false - enable_docker_multibuild: - type: boolean - description: Whether to enable multibuild docker - default: false -runs: - using: composite - steps: - - uses: actions/cache@v3 - if: inputs.enable_go == 'true' - with: - path: | - ~/.cache/go-build - ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - uses: actions/cache@v3 - if: inputs.enable_npm == 'true' - with: - path: | - ~/.npm - key: ${{ runner.os }}-npm-${{ hashFiles('web/ui/package-lock.json') }} - restore-keys: | - ${{ runner.os }}-npm- - - run: make promu - shell: bash - if: inputs.enable_go == 'true' - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - if: inputs.enable_docker_multibuild == 'true' - - name: Set up buildx - uses: docker/setup-buildx-action@v1 - if: inputs.enable_docker_multibuild == 'true' diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 8c2d52a98d..1b05a28693 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -1,5 +1,5 @@ name: buf.build -on: # yamllint disable-line rule:truthy +on: pull_request: paths: - ".github/workflows/buf-lint.yml" @@ -10,7 +10,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.7.0 + - uses: bufbuild/buf-setup-action@v1.23.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 with: input: 'prompb' diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index d874825678..3a8d1d0402 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -1,5 +1,5 @@ name: buf.build -on: # yamllint disable-line rule:truthy +on: push: branches: - main @@ -7,9 +7,12 @@ jobs: buf: name: lint and publish runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.7.0 + - uses: bufbuild/buf-setup-action@v1.23.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@v1 with: input: 'prompb' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9ad7bb8c8c..e8b61bcadb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,8 +1,9 @@ --- name: CI -on: # yamllint disable-line rule:truthy +on: pull_request: push: + jobs: test_go: name: Go tests @@ -10,15 +11,18 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.19-base + image: quay.io/prometheus/golang-builder:1.20-base steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/setup_environment + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/setup_environment - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test ./tsdb/ -test.tsdb-isolation=false + - run: go test --tags=stringlabels ./... + - run: GOARCH=386 go test ./cmd/prometheus - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples - - uses: ./.github/actions/check_proto + - uses: ./.github/promci/actions/check_proto with: version: "3.15.8" @@ -28,18 +32,19 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. container: - image: quay.io/prometheus/golang-builder:1.19-base + image: quay.io/prometheus/golang-builder:1.20-base steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/setup_environment + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/setup_environment with: enable_go: false enable_npm: true - run: make assets-tarball - run: make ui-lint - run: make ui-test - - uses: ./.github/actions/save_artifacts + - uses: ./.github/promci/actions/save_artifacts with: directory: .tarballs @@ -48,9 +53,9 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: - go-version: '>=1.19 <1.20' + go-version: '>=1.20 <1.21' - run: | $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} go test $TestTargets -vet=off -v @@ -61,7 +66,7 @@ jobs: runs-on: ubuntu-latest # The go verson in this image should be N-1 wrt test_go. container: - image: quay.io/prometheus/golang-builder:1.18-base + image: quay.io/prometheus/golang-builder:1.19-base steps: - uses: actions/checkout@v3 - run: make build @@ -100,7 +105,8 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/build + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" parallelism: 3 @@ -122,7 +128,8 @@ jobs: # should also be updated. steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/build + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} @@ -133,16 +140,17 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: '<1.19' + go-version: 1.20.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@v3.2.0 + uses: golangci/golangci-lint-action@v3.6.0 with: - version: v1.49.0 + args: --verbose + version: v1.53.3 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' @@ -156,7 +164,8 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/publish_main + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_password: ${{ secrets.docker_hub_password }} @@ -169,7 +178,8 @@ jobs: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - uses: actions/checkout@v3 - - uses: ./.github/actions/publish_release + - uses: prometheus/promci@v0.1.0 + - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} docker_hub_password: ${{ secrets.docker_hub_password }} @@ -183,12 +193,13 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 + - uses: prometheus/promci@v0.1.0 - name: Install nodejs uses: actions/setup-node@v3 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" - - uses: actions/cache@v3.0.9 + - uses: actions/cache@v3.3.1 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1a04e1cb05..6036e80ae9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,7 +1,7 @@ --- name: "CodeQL" -on: # yamllint disable-line rule:truthy +on: workflow_call: schedule: - cron: "26 14 * * 1" @@ -21,9 +21,9 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: - go-version: '>=1.19 <1.20' + go-version: '>=1.20 <1.21' - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml index 7a6654c69a..c7245c60ad 100644 --- a/.github/workflows/funcbench.yml +++ b/.github/workflows/funcbench.yml @@ -1,4 +1,4 @@ -on: # yamllint disable-line rule:truthy +on: repository_dispatch: types: [funcbench_start] name: Funcbench Workflow diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 11e86a39ea..87c40d3105 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -1,5 +1,5 @@ name: CIFuzz -on: # yamllint disable-line rule:truthy +on: workflow_call: jobs: Fuzzing: diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 88960bb5e6..c6ac3d74ef 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -1,6 +1,6 @@ name: 'Lock Threads' -on: # yamllint disable-line rule:truthy +on: schedule: - cron: '13 23 * * *' workflow_dispatch: @@ -14,8 +14,9 @@ concurrency: jobs: action: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' steps: - - uses: dessant/lock-threads@v3 + - uses: dessant/lock-threads@v4 with: process-only: 'issues' issue-inactive-days: '180' diff --git a/.github/workflows/prombench.yml b/.github/workflows/prombench.yml index d9735ceec2..6ee172662b 100644 --- a/.github/workflows/prombench.yml +++ b/.github/workflows/prombench.yml @@ -1,4 +1,4 @@ -on: # yamllint disable-line rule:truthy +on: repository_dispatch: types: [prombench_start, prombench_restart, prombench_stop] name: Prombench Workflow diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 4cf2ee335a..9526cd2fec 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -1,11 +1,12 @@ --- name: Sync repo files -on: # yamllint disable-line rule:truthy +on: schedule: - cron: '44 17 * * *' jobs: repo_sync: runs-on: ubuntu-latest + if: github.repository_owner == 'prometheus' container: image: quay.io/prometheus/golang-builder steps: diff --git a/.gitignore b/.gitignore index a63f499515..e85d766b09 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,7 @@ benchmark.txt !/.promu.yml !/.golangci.yml /documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter -/documentation/examples/remote_storage/example_write_adapter/example_writer_adapter +/documentation/examples/remote_storage/example_write_adapter/example_write_adapter npm_licenses.tar.bz2 /web/ui/static/react @@ -28,3 +28,6 @@ npm_licenses.tar.bz2 /.build /**/node_modules + +# Ignore parser debug +y.output diff --git a/.golangci.yml b/.golangci.yml index 81790e6e3a..4a6daae594 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - deadline: 5m + timeout: 15m skip-files: # Skip autogenerated files. - ^.*\.(pb|y)\.go$ @@ -10,31 +10,60 @@ output: linters: enable: - depguard + - gocritic - gofumpt - goimports - - revive - misspell + - predeclared + - revive + - unconvert + - unused issues: max-same-issues: 0 exclude-rules: + - linters: + - gocritic + text: "appendAssign" - path: _test.go linters: - errcheck linters-settings: depguard: - list-type: blacklist - include-go-root: true - packages-with-error-message: - - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic" - - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - - io/ioutil: "Use corresponding 'os' or 'io' functions instead." - - regexp: "Use github.com/grafana/regexp instead of regexp" + rules: + main: + deny: + - pkg: "sync/atomic" + desc: "Use go.uber.org/atomic instead of sync/atomic" + - pkg: "github.com/stretchr/testify/assert" + desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" + - pkg: "github.com/go-kit/kit/log" + desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" + - pkg: "io/ioutil" + desc: "Use corresponding 'os' or 'io' functions instead." + - pkg: "regexp" + desc: "Use github.com/grafana/regexp instead of regexp" errcheck: - exclude: scripts/errcheck_excludes.txt + exclude-functions: + # Don't flag lines such as "io.Copy(io.Discard, resp.Body)". + - io.Copy + # The next two are used in HTTP handlers, any error is handled by the server itself. + - io.WriteString + - (net/http.ResponseWriter).Write + # No need to check for errors on server's shutdown. + - (*net/http.Server).Shutdown + # Never check for logger errors. + - (github.com/go-kit/log.Logger).Log + # Never check for rollback errors as Rollback() is called when a previous error was detected. + - (github.com/prometheus/prometheus/storage.Appender).Rollback goimports: local-prefixes: github.com/prometheus/prometheus gofumpt: extra-rules: true + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: true diff --git a/.promu.yml b/.promu.yml index 50c0be2575..f724dc34f3 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, # .circle/config.yml should also be updated. - version: 1.19 + version: 1.20 repository: path: github.com/prometheus/prometheus build: @@ -10,7 +10,15 @@ build: path: ./cmd/prometheus - name: promtool path: ./cmd/promtool - flags: -a -tags netgo,builtinassets + tags: + all: + - netgo + - builtinassets + - stringlabels + windows: + - builtinassets + - stringlabels + flags: -a ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} diff --git a/.yamllint b/.yamllint index 3878a31d30..955a5a6270 100644 --- a/.yamllint +++ b/.yamllint @@ -20,9 +20,4 @@ rules: config/testdata/section_key_dup.bad.yml line-length: disable truthy: - ignore: | - .github/workflows/codeql-analysis.yml - .github/workflows/funcbench.yml - .github/workflows/fuzzing.yml - .github/workflows/prombench.yml - .github/workflows/golangci-lint.yml + check-keys: false diff --git a/CHANGELOG.md b/CHANGELOG.md index c5e858b542..d316e84d37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,173 @@ # Changelog + +## 2.45.0 / 2023-06-23 + +This release is a LTS (Long-Term Support) release of Prometheus and will +receive security, documentation and bugfix patches for at least 12 months. +Please read more about our LTS release cycle at +. + +* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336 +* [FEATURE] Config: Add limits to global config. #12126 +* [FEATURE] Consul SD: Added support for `path_prefix`. #12372 +* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350 +* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262 +* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031 +* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944 +* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055 +* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254 +* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297 +* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185 +* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357 +* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329 +* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245 +* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349 + +## 2.44.0 / 2023-05-13 + +This version is built with Go tag `stringlabels`, to use the smaller data +structure for Labels that was optional in the previous release. For more +details about this code change see #10991. + +* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203 +* [FEATURE] Remote-read: Handle native histograms. #12085, #12192 +* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096 +* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251 +* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326 +* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084 +* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190 +* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270 +* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272 +* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207 +* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135 +* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179 +* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127 + +## 2.43.1 / 2023-05-03 + +* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322 + +## 2.43.0 / 2023-03-21 + +We are working on some performance improvements in Prometheus, which are only +built into Prometheus when compiling it using the Go tag `stringlabels` +(therefore they are not shipped in the default binaries). It uses a data +structure for labels that uses a single string to hold all the label/values, +resulting in a smaller heap size and some speedups in most cases. We would like +to encourage users who are interested in these improvements to help us measure +the gains on their production architecture. We are providing release artefacts +`2.43.0+stringlabels` and Docker images tagged `v2.43.0-stringlabels` with those +improvements for testing. #10991 + +* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487 +* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019 +* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098 +* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098 +* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088 +* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897 +* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984 +* [ENHANCEMENT] TSDB: Add metrics for head chunks and WAL folders size. #12013 +* [ENHANCEMENT] TSDB: Automatically remove incorrect snapshot with index that is ahead of WAL. #11859 +* [ENHANCEMENT] TSDB: Improve Prometheus parser error outputs to be more comprehensible. #11682 +* [ENHANCEMENT] UI: Scope `group by` labels to metric in autocompletion. #11914 +* [BUGFIX] Scrape: Fix `prometheus_target_scrape_pool_target_limit` metric not set before reloading. #12002 +* [BUGFIX] TSDB: Correctly update `prometheus_tsdb_head_chunks_removed_total` and `prometheus_tsdb_head_chunks` metrics when reading WAL. #11858 +* [BUGFIX] TSDB: Use the correct unit (seconds) when recording out-of-order append deltas in the `prometheus_tsdb_sample_ooo_delta` metric. #12004 + +## 2.42.0 / 2023-01-31 + +This release comes with a bunch of feature coverage for native histograms and breaking changes. + +If you are trying native histograms already, we recommend you remove the `wal` directory when upgrading. +Because the old WAL record for native histograms is not backward compatible in v2.42.0, this will lead to some data loss for the latest data. + +Additionally, if you scrape "float histograms" or use recording rules on native histograms in v2.42.0 (which writes float histograms), +it is a one-way street since older versions do not support float histograms. + +* [CHANGE] **breaking** TSDB: Changed WAL record format for the experimental native histograms. #11783 +* [FEATURE] Add 'keep_firing_for' field to alerting rules. #11827 +* [FEATURE] Promtool: Add support of selecting timeseries for TSDB dump. #11872 +* [ENHANCEMENT] Agent: Native histogram support. #11842 +* [ENHANCEMENT] Rules: Support native histograms in recording rules. #11838 +* [ENHANCEMENT] SD: Add container ID as a meta label for pod targets for Kubernetes. #11844 +* [ENHANCEMENT] SD: Add VM size label to azure service discovery. #11650 +* [ENHANCEMENT] Support native histograms in federation. #11830 +* [ENHANCEMENT] TSDB: Add gauge histogram support. #11783 #11840 #11814 +* [ENHANCEMENT] TSDB/Scrape: Support FloatHistogram that represents buckets as float64 values. #11522 #11817 #11716 +* [ENHANCEMENT] UI: Show individual scrape pools on /targets page. #11142 + +## 2.41.0 / 2022-12-20 + +* [FEATURE] Relabeling: Add `keepequal` and `dropequal` relabel actions. #11564 +* [FEATURE] Add support for HTTP proxy headers. #11712 +* [ENHANCEMENT] Reload private certificates when changed on disk. #11685 +* [ENHANCEMENT] Add `max_version` to specify maximum TLS version in `tls_config`. #11685 +* [ENHANCEMENT] Add `goos` and `goarch` labels to `prometheus_build_info`. #11685 +* [ENHANCEMENT] SD: Add proxy support for EC2 and LightSail SDs #11611 +* [ENHANCEMENT] SD: Add new metric `prometheus_sd_file_watcher_errors_total`. #11066 +* [ENHANCEMENT] Remote Read: Use a pool to speed up marshalling. #11357 +* [ENHANCEMENT] TSDB: Improve handling of tombstoned chunks in iterators. #11632 +* [ENHANCEMENT] TSDB: Optimize postings offset table reading. #11535 +* [BUGFIX] Scrape: Validate the metric name, label names, and label values after relabeling. #11074 +* [BUGFIX] Remote Write receiver and rule manager: Fix error handling. #11727 + +## 2.40.7 / 2022-12-14 + +* [BUGFIX] Use Windows native DNS resolver. #11704 +* [BUGFIX] TSDB: Fix queries involving negative buckets of native histograms. #11699 + +## 2.40.6 / 2022-12-09 + +* [SECURITY] Security upgrade from go and upstream dependencies that include + security fixes to the net/http and os packages. #11691 + +## 2.40.5 / 2022-12-01 + +* [BUGFIX] TSDB: Fix queries involving native histograms due to improper reset of iterators. #11643 + +## 2.40.4 / 2022-11-29 + +* [SECURITY] Fix basic authentication bypass vulnerability (CVE-2022-46146). GHSA-4v48-4q5m-8vx4 + +## 2.40.3 / 2022-11-23 + +* [BUGFIX] TSDB: Fix compaction after a deletion is called. #11623 + +## 2.40.2 / 2022-11-16 + +* [BUGFIX] UI: Fix black-on-black metric name color in dark mode. #11572 + +## 2.40.1 / 2022-11-09 + +* [BUGFIX] TSDB: Fix alignment for atomic int64 for 32 bit architecture. #11547 +* [BUGFIX] Scrape: Fix accept headers. #11552 + +## 2.40.0 / 2022-11-08 + +This release introduces an experimental, native way of representing and storing histograms. + +It can be enabled in Prometheus via `--enable-feature=native-histograms` to accept native histograms. +Enabling native histograms will also switch the preferred exposition format to protobuf. + +To instrument your application with native histograms, use the `main` branch of `client_golang` (this will change for the final release when v1.14.0 of client_golang will be out), and set the `NativeHistogramBucketFactor` in your `HistogramOpts` (`1.1` is a good starting point). +Your existing histograms won't switch to native histograms until `NativeHistogramBucketFactor` is set. + +* [FEATURE] Add **experimental** support for native histograms. Enable with the flag `--enable-feature=native-histograms`. #11447 +* [FEATURE] SD: Add service discovery for OVHcloud. #10802 +* [ENHANCEMENT] Kubernetes SD: Use protobuf encoding. #11353 +* [ENHANCEMENT] TSDB: Use golang.org/x/exp/slices for improved sorting speed. #11054 #11318 #11380 +* [ENHANCEMENT] Consul SD: Add enterprise admin partitions. Adds `__meta_consul_partition` label. Adds `partition` config in `consul_sd_config`. #11482 +* [BUGFIX] API: Fix API error codes for `/api/v1/labels` and `/api/v1/series`. #11356 + +## 2.39.2 / 2022-11-09 + +* [BUGFIX] TSDB: Fix alignment for atomic int64 for 32 bit architecture. #11547 + +## 2.39.1 / 2022-10-07 + +* [BUGFIX] Rules: Fix notifier relabel changing the labels on active alerts. #11427 + ## 2.39.0 / 2022-10-05 * [FEATURE] **experimental** TSDB: Add support for ingesting out-of-order samples. This is configured via `out_of_order_time_window` field in the config file; check config file docs for more info. #11075 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 844f8e09fc..57055ef38c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command: ```bash # Pick the latest tagged release. -go install example.com/some/module/pkg@latest +go get example.com/some/module/pkg@latest # Pick a specific version. -go install example.com/some/module/pkg@vX.Y.Z +go get example.com/some/module/pkg@vX.Y.Z ``` Tidy up the `go.mod` and `go.sum` files: @@ -78,3 +78,20 @@ GO111MODULE=on go mod tidy ``` You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request. + +## Working with the PromQL parser + +The PromQL parser grammar is located in `promql/parser/generated_parser.y` and it can be built using `make parser`. +The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc) + +If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you +can modify the `./promql/parser/generated_parser.y.go` manually. + +```golang +// As of writing this was somewhere around line 600. +var ( + yyDebug = 0 // This can be be a number 0 -> 5. + yyErrorVerbose = false // This can be set to true. +) + +``` diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 103ebdda64..1175bb9a6f 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -10,7 +10,7 @@ Julien Pivotto ( / @roidelapluie) and Levi Harrison * `prometheus-mixin`: Björn Rabenstein ( / @beorn7) * `storage` * `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie) -* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka) +* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez) * `agent`: Robert Fratto ( / @rfratto) * `web` * `ui`: Julius Volz ( / @juliusv) diff --git a/Makefile b/Makefile index c4a9a06cfa..0dd8673af3 100644 --- a/Makefile +++ b/Makefile @@ -78,6 +78,17 @@ assets-tarball: assets @echo '>> packaging assets' scripts/package_assets.sh +# We only want to generate the parser when there's changes to the grammar. +.PHONY: parser +parser: + @echo ">> running goyacc to generate the .go file." +ifeq (, $(shell command -v goyacc > /dev/null)) + @echo "goyacc not installed so skipping" + @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" +else + goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y +endif + .PHONY: test # If we only want to only test go code we have to change the test target # which is called by all. @@ -90,8 +101,10 @@ endif .PHONY: npm_licenses npm_licenses: ui-install @echo ">> bundling npm licenses" - rm -f $(REACT_APP_NPM_LICENSES_TARBALL) - find $(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=- + rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses + ln -s . npm_licenses + find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=- + rm -f npm_licenses .PHONY: tarball tarball: npm_licenses common-tarball @@ -107,7 +120,7 @@ plugins/plugins.go: plugins.yml plugins/generate.go plugins: plugins/plugins.go .PHONY: build -build: assets npm_licenses assets-compress common-build plugins +build: assets npm_licenses assets-compress plugins common-build .PHONY: bench_tsdb bench_tsdb: $(PROMU) @@ -120,3 +133,8 @@ bench_tsdb: $(PROMU) @$(GO) tool pprof --alloc_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg @$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg @$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg + +.PHONY: cli-documentation +cli-documentation: + $(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md + $(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md diff --git a/Makefile.common b/Makefile.common index 7642c4485c..0ce7ea4612 100644 --- a/Makefile.common +++ b/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 +GOLANGCI_LINT_VERSION ?= v1.53.3 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -176,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -205,7 +207,7 @@ common-tarball: promu .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) diff --git a/README.md b/README.md index 6ca98143cb..8b89bb01e5 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@ -# Prometheus +

+ Prometheus
Prometheus +

-[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/main.svg?style=shield)][circleci] +

Visit prometheus.io for the full documentation, +examples and guides.

+ +
+ +[![CI](https://github.com/prometheus/prometheus/actions/workflows/ci.yml/badge.svg)](https://github.com/prometheus/prometheus/actions/workflows/ci.yml) [![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) @@ -8,8 +15,7 @@ [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) -Visit [prometheus.io](https://prometheus.io) for the full documentation, -examples and guides. +
Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, @@ -28,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste ## Architecture overview -![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) +![Architecture overview](documentation/images/architecture.svg) ## Install @@ -132,8 +138,6 @@ make npm_licenses make common-docker-amd64 ``` -*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar). - ## Using Prometheus as a Go Library ### Remote Write @@ -172,7 +176,6 @@ For more information on building, running, and developing on the React-based UI, ## More information * Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y. -* You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml). * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. ## Contributing @@ -184,5 +187,4 @@ Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CO Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE). [hub]: https://hub.docker.com/r/prom/prometheus/ -[circleci]: https://circleci.com/gh/prometheus/prometheus [quay]: https://quay.io/repository/prometheus/prometheus diff --git a/RELEASE.md b/RELEASE.md index 5cf09e58e2..0d0918191b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -44,8 +44,15 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) | | v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) | | v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) | -| v2.40 | 2022-11-02 | **searching for volunteer** | -| v2.41 | 2022-12-14 | **searching for volunteer** | +| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) | +| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) | +| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) | +| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) | +| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) | +| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) | +| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) | +| v2.47 | 2023-08-23 | **searching for volunteer** | +| v2.48 | 2023-10-04 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. diff --git a/VERSION b/VERSION index cde8adf34d..e599014eab 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.39.0 +2.45.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 65fcf15238..debc0d3f9d 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -12,6 +12,7 @@ // limitations under the License. // The main package for the Prometheus server executable. +// nolint:revive // Many unsued function arguments in this file by design. package main import ( @@ -33,6 +34,7 @@ import ( "syscall" "time" + "github.com/alecthomas/kingpin/v2" "github.com/alecthomas/units" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -45,10 +47,8 @@ import ( promlogflag "github.com/prometheus/common/promlog/flag" "github.com/prometheus/common/version" toolkit_web "github.com/prometheus/exporter-toolkit/web" - toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" "go.uber.org/atomic" "go.uber.org/automaxprocs/maxprocs" - "gopkg.in/alecthomas/kingpin.v2" "k8s.io/klog" klogv2 "k8s.io/klog/v2" @@ -57,6 +57,7 @@ import ( "github.com/prometheus/prometheus/discovery/legacymanager" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/relabel" @@ -70,9 +71,10 @@ import ( "github.com/prometheus/prometheus/tracing" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/agent" + "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/logging" prom_runtime "github.com/prometheus/prometheus/util/runtime" - "github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/web" ) @@ -195,6 +197,10 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { case "no-default-scrape-port": c.scrape.NoDefaultPort = true level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") + case "native-histograms": + c.tsdb.EnableNativeHistograms = true + c.scrape.EnableProtobufNegotiation = true + level.Info(logger).Log("msg", "Experimental native histogram support enabled.") case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -204,6 +210,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { } } } + + if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown { + c.tsdb.EnableMemorySnapshotOnShutdown = false + level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.") + } + return nil } @@ -241,7 +253,10 @@ func main() { a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) - webConfig := toolkit_webflag.AddFlags(a) + webConfig := a.Flag( + "web.config.file", + "[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.", + ).Default("").String() a.Flag("web.read-timeout", "Maximum duration before timing out read of the request, and closing idle connections."). @@ -320,9 +335,15 @@ func main() { serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) + serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental."). Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize) + serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk."). + Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk) + agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage."). Default("data-agent/").StringVar(&cfg.agentStoragePath) @@ -333,6 +354,9 @@ func main() { agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL."). Default("true").BoolVar(&cfg.agent.WALCompression) + agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL."). + Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd)) + agentOnlyFlag(a, "storage.agent.wal-truncate-frequency", "The frequency at which to truncate the WAL and remove old data."). Hidden().PlaceHolder("").SetValue(&cfg.agent.TruncateFrequency) @@ -396,14 +420,23 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) + a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { + if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { + os.Exit(1) + return err + } + os.Exit(0) + return nil + }).Bool() + _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err)) a.Usage(os.Args[1:]) os.Exit(2) } @@ -456,11 +489,19 @@ func main() { level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) os.Exit(2) } + if _, err := cfgFile.GetScrapeConfigs(); err != nil { + absPath, pathErr := filepath.Abs(cfg.configFile) + if pathErr != nil { + absPath = cfg.configFile + } + level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) + os.Exit(2) + } if cfg.tsdb.EnableExemplarStorage { if cfgFile.StorageConfig.ExemplarsConfig == nil { cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig } - cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars) + cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars } if cfgFile.StorageConfig.TSDBConfig != nil { cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow @@ -619,7 +660,7 @@ func main() { Appendable: fanoutStorage, Queryable: localStorage, QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), - NotifyFunc: sendAlerts(notifierManager, cfg.web.ExternalURL.String()), + NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()), Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, @@ -718,7 +759,11 @@ func main() { name: "scrape_sd", reloader: func(cfg *config.Config) error { c := make(map[string]discovery.Configs) - for _, v := range cfg.ScrapeConfigs { + scfgs, err := cfg.GetScrapeConfigs() + if err != nil { + return err + } + for _, v := range scfgs { c[v.JobName] = v.ServiceDiscoveryConfigs } return discoveryManagerScrape.ApplyConfig(c) @@ -1015,6 +1060,7 @@ func main() { startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) localStorage.Set(db, startTimeMargin) + db.SetWriteNotified(remoteStorage) close(dbOpen) <-cancel return nil @@ -1270,36 +1316,6 @@ func computeExternalURL(u, listenAddr string) (*url.URL, error) { return eu, nil } -type sender interface { - Send(alerts ...*notifier.Alert) -} - -// sendAlerts implements the rules.NotifyFunc for a Notifier. -func sendAlerts(s sender, externalURL string) rules.NotifyFunc { - return func(ctx context.Context, expr string, alerts ...*rules.Alert) { - var res []*notifier.Alert - - for _, alert := range alerts { - a := ¬ifier.Alert{ - StartsAt: alert.FiredAt, - Labels: alert.Labels, - Annotations: alert.Annotations, - GeneratorURL: externalURL + strutil.TableLinkForExpression(expr), - } - if !alert.ResolvedAt.IsZero() { - a.EndsAt = alert.ResolvedAt - } else { - a.EndsAt = alert.ValidUntil - } - res = append(res, a) - } - - if len(alerts) > 0 { - s.Send(res...) - } - } -} - // readyStorage implements the Storage interface while allowing to set the actual // storage at a later point in time. type readyStorage struct { @@ -1411,6 +1427,10 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, return 0, tsdb.ErrNotReady } +func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, tsdb.ErrNotReady +} + func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, tsdb.ErrNotReady } @@ -1473,11 +1493,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error { } // Stats implements the api_v1.TSDBAdminStats interface. -func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) { +func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) { if x := s.get(); x != nil { switch db := x.(type) { case *tsdb.DB: - return db.Head().Stats(statsByLabelName), nil + return db.Head().Stats(statsByLabelName, limit), nil case *agent.DB: return nil, agent.ErrUnsupported default: @@ -1533,7 +1553,9 @@ type tsdbOptions struct { MaxBytes units.Base2Bytes NoLockfile bool WALCompression bool + WALCompressionType string HeadChunksWriteQueueSize int + SamplesPerChunk int StripeSize int MinBlockDuration model.Duration MaxBlockDuration model.Duration @@ -1541,6 +1563,7 @@ type tsdbOptions struct { EnableExemplarStorage bool MaxExemplars int64 EnableMemorySnapshotOnShutdown bool + EnableNativeHistograms bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1551,14 +1574,16 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { MaxBytes: int64(opts.MaxBytes), NoLockfile: opts.NoLockfile, AllowOverlappingCompaction: true, - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize, + SamplesPerChunk: opts.SamplesPerChunk, StripeSize: opts.StripeSize, MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond), MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond), EnableExemplarStorage: opts.EnableExemplarStorage, MaxExemplars: opts.MaxExemplars, EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, + EnableNativeHistograms: opts.EnableNativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, } } @@ -1568,6 +1593,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { type agentOptions struct { WALSegmentSize units.Base2Bytes WALCompression bool + WALCompressionType string StripeSize int TruncateFrequency model.Duration MinWALTime, MaxWALTime model.Duration @@ -1577,7 +1603,7 @@ type agentOptions struct { func (opts agentOptions) ToAgentOptions() agent.Options { return agent.Options{ WALSegmentSize: int(opts.WALSegmentSize), - WALCompression: opts.WALCompression, + WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType), StripeSize: opts.StripeSize, TruncateFrequency: time.Duration(opts.TruncateFrequency), MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)), diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 7dec5b9a59..21447d0369 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -23,6 +23,7 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" "syscall" "testing" @@ -120,7 +121,7 @@ func TestFailedStartupExitCode(t *testing.T) { fakeInputFile := "fake-input-file" expectedExitStatus := 2 - prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile) + prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile) err := prom.Run() require.Error(t, err) @@ -198,7 +199,7 @@ func TestSendAlerts(t *testing.T) { } require.Equal(t, tc.exp, alerts) }) - sendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...) + rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...) }) } } @@ -357,7 +358,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames } func TestAgentSuccessfulStartup(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -375,7 +376,7 @@ func TestAgentSuccessfulStartup(t *testing.T) { } func TestAgentFailedStartupWithServerFlag(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) output := bytes.Buffer{} prom.Stderr = &output @@ -402,7 +403,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) { } func TestAgentFailedStartupWithInvalidConfig(t *testing.T) { - prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig) + prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig) require.NoError(t, prom.Start()) actualExitStatus := 0 @@ -437,7 +438,7 @@ func TestModeSpecificFlags(t *testing.T) { for _, tc := range testcases { t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) { - args := []string{"-test.main", tc.arg, t.TempDir()} + args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"} if tc.mode == "agent" { args = append(args, "--enable-feature=agent", "--config.file="+agentConfig) @@ -483,3 +484,31 @@ func TestModeSpecificFlags(t *testing.T) { }) } } + +func TestDocumentation(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, promPath, "-test.main", "--write-documentation") + + var stdout bytes.Buffer + cmd.Stdout = &stdout + + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ExitCode() != 0 { + fmt.Println("Command failed with non-zero exit code") + } + } + } + + generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test")) + + expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "prometheus.md")) + require.NoError(t, err) + + require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.") +} diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index b49110ea91..7224e25d70 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -72,9 +72,11 @@ Loop: if !startedOk { t.Fatal("prometheus didn't start in the specified timeout") } - if err := prom.Process.Kill(); err == nil { + switch err := prom.Process.Kill(); { + case err == nil: t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") - } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected! + case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt": + // TODO: find a better way to detect when the process didn't exit as expected! t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr) } } diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index d5dfbea509..f20f2a22c0 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -193,7 +193,7 @@ func (p *queryLogTest) String() string { } name = name + ", " + p.host + ":" + strconv.Itoa(p.port) if p.enabledAtStart { - name = name + ", enabled at start" + name += ", enabled at start" } if p.prefix != "" { name = name + ", with prefix " + p.prefix diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 3c23d2c037..39410881b2 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) - for t := mint; t <= maxt; t = t + blockDuration { + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { // The next sample is not in this timerange, we can avoid parsing diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index c4e6c6b7c0..e6f7cad31b 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) type backfillSample struct { @@ -43,14 +44,14 @@ func sortSamples(samples []backfillSample) { }) } -func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { +func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) samples := []backfillSample{} for ss.Next() { series := ss.At() - it := series.Iterator() + it := series.Iterator(nil) require.NoError(t, it.Err()) - for it.Next() { + for it.Next() == chunkenc.ValFloat { ts, v := it.At() samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()}) } diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index a2f710d147..f94be8b27f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -31,6 +31,7 @@ import ( "text/tabwriter" "time" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/google/pprof/profile" "github.com/prometheus/client_golang/api" @@ -41,10 +42,10 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" - "gopkg.in/alecthomas/kingpin.v2" "gopkg.in/yaml.v2" dto "github.com/prometheus/client_model/go" + promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/expfmt" "github.com/prometheus/prometheus/config" @@ -58,6 +59,7 @@ import ( _ "github.com/prometheus/prometheus/plugins" // Register plugins. "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/util/documentcli" ) const ( @@ -69,11 +71,20 @@ const ( lintOptionAll = "all" lintOptionDuplicateRules = "duplicate-rules" lintOptionNone = "none" + checkHealth = "/-/healthy" + checkReadiness = "/-/ready" ) var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone} func main() { + var ( + httpRoundTripper = api.DefaultRoundTripper + serverURL *url.URL + remoteWriteURL *url.URL + httpConfigFilePath string + ) + app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout) app.Version(version.Print("promtool")) app.HelpFlag.Short('h') @@ -105,11 +116,19 @@ func main() { "The config files to check.", ).Required().ExistingFiles() + checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.") + checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL) + + checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.") + checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL) + checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.") ruleFiles := checkRulesCmd.Arg( "rule-files", - "The rule files to check.", - ).Required().ExistingFiles() + "The rule files to check, default is read from standard input.", + ).ExistingFiles() checkRulesLint := checkRulesCmd.Flag( "lint", "Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting", @@ -124,14 +143,15 @@ func main() { queryCmd := app.Command("query", "Run query against a Prometheus server.") queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json") + queryCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) queryInstantCmd := queryCmd.Command("instant", "Run instant query.") - queryInstantServer := queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URL() + queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL) queryInstantExpr := queryInstantCmd.Arg("expr", "PromQL query expression.").Required().String() queryInstantTime := queryInstantCmd.Flag("time", "Query evaluation time (RFC3339 or Unix timestamp).").String() queryRangeCmd := queryCmd.Command("range", "Run range query.") - queryRangeServer := queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URL() + queryRangeCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL) queryRangeExpr := queryRangeCmd.Arg("expr", "PromQL query expression.").Required().String() queryRangeHeaders := queryRangeCmd.Flag("header", "Extra headers to send to server.").StringMap() queryRangeBegin := queryRangeCmd.Flag("start", "Query range start time (RFC3339 or Unix timestamp).").String() @@ -139,7 +159,7 @@ func main() { queryRangeStep := queryRangeCmd.Flag("step", "Query step size (duration).").Duration() querySeriesCmd := queryCmd.Command("series", "Run series query.") - querySeriesServer := querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URL() + querySeriesCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL) querySeriesMatch := querySeriesCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().Strings() querySeriesBegin := querySeriesCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String() querySeriesEnd := querySeriesCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String() @@ -153,12 +173,24 @@ func main() { debugAllServer := debugAllCmd.Arg("server", "Prometheus server to get all debug information from.").Required().String() queryLabelsCmd := queryCmd.Command("labels", "Run labels query.") - queryLabelsServer := queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URL() + queryLabelsCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL) queryLabelsName := queryLabelsCmd.Arg("name", "Label name to provide label values for.").Required().String() queryLabelsBegin := queryLabelsCmd.Flag("start", "Start time (RFC3339 or Unix timestamp).").String() queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String() queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings() + pushCmd := app.Command("push", "Push to a Prometheus server.") + pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).") + pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL) + metricFiles := pushMetricsCmd.Arg( + "metric-files", + "The metric files to push, default is read from standard input.", + ).ExistingFiles() + pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap() + pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration() + pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() + testCmd := app.Command("test", "Unit testing.") testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesFiles := testRulesCmd.Arg( @@ -190,6 +222,7 @@ func main() { dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String() importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool() @@ -199,7 +232,8 @@ func main() { importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String() importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String() importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.") - importRulesURL := importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URL() + importRulesCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath) + importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL) importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required."). Required().String() importRulesEnd := importRulesCmd.Flag("end", "If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp.").String() @@ -213,6 +247,8 @@ func main() { featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() + documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() + parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:])) var p printer @@ -223,6 +259,22 @@ func main() { p = &promqlPrinter{} } + if httpConfigFilePath != "" { + if serverURL != nil && serverURL.User.Username() != "" { + kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time") + } + var err error + httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath) + if err != nil { + kingpin.Fatalf("Failed to load HTTP config file: %v", err) + } + + httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version)) + if err != nil { + kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err) + } + } + var noDefaultScrapePort bool for _, f := range *featureList { opts := strings.Split(f, ",") @@ -247,6 +299,12 @@ func main() { case checkConfigCmd.FullCommand(): os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) + case checkServerHealthCmd.FullCommand(): + os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper))) + + case checkServerReadyCmd.FullCommand(): + os.Exit(checkErr(CheckServerStatus(serverURL, checkReadiness, httpRoundTripper))) + case checkWebConfigCmd.FullCommand(): os.Exit(CheckWebConfig(*webConfigFiles...)) @@ -256,14 +314,17 @@ func main() { case checkMetricsCmd.FullCommand(): os.Exit(CheckMetrics(*checkMetricsExtended)) + case pushMetricsCmd.FullCommand(): + os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...)) + case queryInstantCmd.FullCommand(): - os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p)) + os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p)) case queryRangeCmd.FullCommand(): - os.Exit(QueryRange(*queryRangeServer, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p)) + os.Exit(QueryRange(serverURL, httpRoundTripper, *queryRangeHeaders, *queryRangeExpr, *queryRangeBegin, *queryRangeEnd, *queryRangeStep, p)) case querySeriesCmd.FullCommand(): - os.Exit(QuerySeries(*querySeriesServer, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p)) + os.Exit(QuerySeries(serverURL, httpRoundTripper, *querySeriesMatch, *querySeriesBegin, *querySeriesEnd, p)) case debugPprofCmd.FullCommand(): os.Exit(debugPprof(*debugPprofServer)) @@ -275,7 +336,7 @@ func main() { os.Exit(debugAll(*debugAllServer)) case queryLabelsCmd.FullCommand(): - os.Exit(QueryLabels(*queryLabelsServer, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) + os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) case testRulesCmd.FullCommand(): os.Exit(RulesUnitTest( @@ -296,13 +357,15 @@ func main() { os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) case tsdbDumpCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime))) + os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) case importRulesCmd.FullCommand(): - os.Exit(checkErr(importRules(*importRulesURL, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) + os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) + case documentationCmd.FullCommand(): + os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) } } @@ -338,6 +401,43 @@ func (ls lintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } +// Check server status - healthy & ready. +func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { + if serverURL.Scheme == "" { + serverURL.Scheme = "http" + } + + config := api.Config{ + Address: serverURL.String() + checkEndpoint, + RoundTripper: roundTripper, + } + + // Create new client. + c, err := api.NewClient(config) + if err != nil { + fmt.Fprintln(os.Stderr, "error creating API client:", err) + return err + } + + request, err := http.NewRequest("GET", config.Address, nil) + if err != nil { + return err + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + response, dataBytes, err := c.Do(ctx, request) + if err != nil { + return err + } + + if response.StatusCode != http.StatusOK { + return fmt.Errorf("check failed: URL=%s, status=%d", serverURL, response.StatusCode) + } + + fmt.Fprintln(os.Stderr, " SUCCESS: ", string(dataBytes)) + return nil +} + // CheckConfig validates configuration files. func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int { failed := false @@ -357,20 +457,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files } fmt.Println() - for _, rf := range ruleFiles { - if n, errs := checkRules(rf, lintSettings); len(errs) > 0 { - fmt.Fprintln(os.Stderr, " FAILED:") - for _, err := range errs { - fmt.Fprintln(os.Stderr, " ", err) - } - failed = true - for _, err := range errs { - hasErrors = hasErrors || !errors.Is(err, lintError) - } - } else { - fmt.Printf(" SUCCESS: %d rules found\n", n) - } - fmt.Println() + rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings) + if rulesFailed { + failed = rulesFailed + } + if rulesHasErrors { + hasErrors = rulesHasErrors } } if failed && hasErrors { @@ -437,7 +529,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin } } - for _, scfg := range cfg.ScrapeConfigs { + var scfgs []*config.ScrapeConfig + if checkSyntaxOnly { + scfgs = cfg.ScrapeConfigs + } else { + var err error + scfgs, err = cfg.GetScrapeConfigs() + if err != nil { + return nil, fmt.Errorf("error loading scrape configs: %w", err) + } + } + + for _, scfg := range scfgs { if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil { if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil { return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err) @@ -587,9 +690,57 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) { func CheckRules(ls lintConfig, files ...string) int { failed := false hasErrors := false + if len(files) == 0 { + fmt.Println("Checking standard input") + data, err := io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return failureExitCode + } + rgs, errs := rulefmt.Parse(data) + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + return failureExitCode + } + if n, errs := checkRuleGroups(rgs, ls); errs != nil { + fmt.Fprintln(os.Stderr, " FAILED:") + for _, e := range errs { + fmt.Fprintln(os.Stderr, e.Error()) + } + failed = true + for _, err := range errs { + hasErrors = hasErrors || !errors.Is(err, lintError) + } + } else { + fmt.Printf(" SUCCESS: %d rules found\n", n) + } + fmt.Println() + } else { + failed, hasErrors = checkRules(files, ls) + } + if failed && hasErrors { + return failureExitCode + } + if failed && ls.fatal { + return lintErrExitCode + } + + return successExitCode +} + +// checkRules validates rule files. +func checkRules(files []string, ls lintConfig) (bool, bool) { + failed := false + hasErrors := false for _, f := range files { - if n, errs := checkRules(f, ls); errs != nil { + fmt.Println("Checking", f) + rgs, errs := rulefmt.ParseFile(f) + if errs != nil { + failed = true + continue + } + if n, errs := checkRuleGroups(rgs, ls); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -603,23 +754,10 @@ func CheckRules(ls lintConfig, files ...string) int { } fmt.Println() } - if failed && hasErrors { - return failureExitCode - } - if failed && ls.fatal { - return lintErrExitCode - } - return successExitCode + return failed, hasErrors } -func checkRules(filename string, lintSettings lintConfig) (int, []error) { - fmt.Println("Checking", filename) - - rgs, errs := rulefmt.ParseFile(filename) - if errs != nil { - return successExitCode, errs - } - +func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) { numRules := 0 for _, rg := range rgs.Groups { numRules += len(rg.Rules) @@ -631,9 +769,9 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) { errMessage := fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules)) for _, n := range dRules { errMessage += fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric) - for _, l := range n.label { + n.label.Range(func(l labels.Label) { errMessage += fmt.Sprintf("\t%s: %s\n", l.Name, l.Value) - } + }) } errMessage += "Might cause inconsistency while recording expressions" return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)} @@ -794,12 +932,13 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) { } // QueryInstant performs an instant query against a Prometheus server. -func QueryInstant(url *url.URL, query, evalTime string, p printer) int { +func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } config := api.Config{ - Address: url.String(), + Address: url.String(), + RoundTripper: roundTripper, } // Create new client. @@ -834,12 +973,13 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int { } // QueryRange performs a range query against a Prometheus server. -func QueryRange(url *url.URL, headers map[string]string, query, start, end string, step time.Duration, p printer) int { +func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int { if url.Scheme == "" { url.Scheme = "http" } config := api.Config{ - Address: url.String(), + Address: url.String(), + RoundTripper: roundTripper, } if len(headers) > 0 { @@ -847,7 +987,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin for key, value := range headers { req.Header.Add(key, value) } - return http.DefaultTransport.RoundTrip(req) + return roundTripper.RoundTrip(req) }) } @@ -907,12 +1047,13 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin } // QuerySeries queries for a series against a Prometheus server. -func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) int { +func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } config := api.Config{ - Address: url.String(), + Address: url.String(), + RoundTripper: roundTripper, } // Create new client. @@ -943,12 +1084,13 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer) } // QueryLabels queries for label values against a Prometheus server. -func QueryLabels(url *url.URL, matchers []string, name, start, end string, p printer) int { +func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int { if url.Scheme == "" { url.Scheme = "http" } config := api.Config{ - Address: url.String(), + Address: url.String(), + RoundTripper: roundTripper, } // Create new client. @@ -1153,7 +1295,7 @@ func (j *jsonPrinter) printLabelValues(v model.LabelValues) { // importRules backfills recording rules from the files provided. The output are blocks of data // at the outputDir location. -func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error { +func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error { ctx := context.Background() var stime, etime time.Time var err error @@ -1183,7 +1325,8 @@ func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBl maxBlockDuration: maxBlockDuration, } client, err := api.NewClient(api.Config{ - Address: url.String(), + Address: url.String(), + RoundTripper: roundTripper, }) if err != nil { return fmt.Errorf("new api client error: %w", err) @@ -1219,8 +1362,11 @@ func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg * } func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error { + var targets []*scrape.Target + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tg := range targetGroups { - _, failures := scrape.TargetsFromGroup(tg, scfg, false) + var failures []error + targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) if len(failures) > 0 { first := failures[0] return first diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 40df3e5248..6cfa48798e 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -14,6 +14,8 @@ package main import ( + "bytes" + "context" "errors" "fmt" "net/http" @@ -21,6 +23,7 @@ import ( "net/url" "os" "os/exec" + "path/filepath" "runtime" "strings" "syscall" @@ -56,14 +59,14 @@ func TestQueryRange(t *testing.T) { require.Equal(t, nil, err) p := &promqlPrinter{} - exitCode := QueryRange(urlObject, map[string]string{}, "up", "0", "300", 0, p) + exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p) require.Equal(t, "/api/v1/query_range", getRequest().URL.Path) form := getRequest().Form require.Equal(t, "up", form.Get("query")) require.Equal(t, "1", form.Get("step")) require.Equal(t, 0, exitCode) - exitCode = QueryRange(urlObject, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p) + exitCode = QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p) require.Equal(t, "/api/v1/query_range", getRequest().URL.Path) form = getRequest().Form require.Equal(t, "up", form.Get("query")) @@ -79,7 +82,7 @@ func TestQueryInstant(t *testing.T) { require.Equal(t, nil, err) p := &promqlPrinter{} - exitCode := QueryInstant(urlObject, "up", "300", p) + exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p) require.Equal(t, "/api/v1/query", getRequest().URL.Path) form := getRequest().Form require.Equal(t, "up", form.Get("query")) @@ -433,3 +436,31 @@ func TestExitCodes(t *testing.T) { }) } } + +func TestDocumentation(t *testing.T) { + if runtime.GOOS == "windows" { + t.SkipNow() + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, promtoolPath, "-test.main", "write-documentation") + + var stdout bytes.Buffer + cmd.Stdout = &stdout + + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.ExitCode() != 0 { + fmt.Println("Command failed with non-zero exit code") + } + } + } + + generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promtoolPath), strings.TrimSuffix(filepath.Base(promtoolPath), ".test")) + + expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "promtool.md")) + require.NoError(t, err) + + require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.") +} diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go new file mode 100644 index 0000000000..2bc2237e2f --- /dev/null +++ b/cmd/promtool/metrics.go @@ -0,0 +1,138 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "time" + + "github.com/golang/snappy" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/storage/remote" + "github.com/prometheus/prometheus/util/fmtutil" +) + +// Push metrics to a prometheus remote write (for testing purpose only). +func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { + addressURL, err := url.Parse(url.String()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // build remote write client + writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{ + URL: &config_util.URL{URL: addressURL}, + Timeout: model.Duration(timeout), + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return failureExitCode + } + + // set custom tls config from httpConfigFilePath + // set custom headers to every request + client, ok := writeClient.(*remote.Client) + if !ok { + fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient)) + return failureExitCode + } + client.Client.Transport = &setHeadersTransport{ + RoundTripper: roundTripper, + headers: headers, + } + + var data []byte + var failed bool + + if len(files) == 0 { + data, err = io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return failureExitCode + } + fmt.Printf("Parsing standard input\n") + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics pushed to remote write.\n") + return successExitCode + } + return failureExitCode + } + + for _, file := range files { + data, err = os.ReadFile(file) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + failed = true + continue + } + + fmt.Printf("Parsing metrics file %s\n", file) + if parseAndPushMetrics(client, data, labels) { + fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file) + continue + } + failed = true + } + + if failed { + return failureExitCode + } + + return successExitCode +} + +func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { + metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + raw, err := metricsData.Marshal() + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + // Encode the request body into snappy encoding. + compressed := snappy.Encode(nil, raw) + err = client.Store(context.Background(), compressed) + if err != nil { + fmt.Fprintln(os.Stderr, " FAILED:", err) + return false + } + + return true +} + +type setHeadersTransport struct { + http.RoundTripper + headers map[string]string +} + +func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) { + for key, value := range s.headers { + req.Header.Set(key, value) + } + return s.RoundTripper.RoundTrip(req) +} diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go index 4dbef34ebb..d8d6bb83e1 100644 --- a/cmd/promtool/rules.go +++ b/cmd/promtool/rules.go @@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que } // loadGroups parses groups from a list of recording rule files. -func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) { +func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) { groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...) if errs != nil { return errs @@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName startInMs := start.Unix() * int64(time.Second/time.Millisecond) endInMs := end.Unix() * int64(time.Second/time.Millisecond) - for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration { + for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration { endOfBlock := startOfBlock + blockDuration - 1 currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix()) @@ -158,14 +158,15 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName // Setting the rule labels after the output of the query, // so they can override query output. - for _, l := range ruleLabels { + ruleLabels.Range(func(l labels.Label) { lb.Set(l.Name, l.Value) - } + }) lb.Set(labels.MetricName, ruleName) + lbls := lb.Labels() for _, value := range sample.Values { - if err := app.add(ctx, lb.Labels(nil), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil { + if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil { return fmt.Errorf("add: %w", err) } } diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go index c7e9933fb4..213b7d2a01 100644 --- a/cmd/promtool/rules_test.go +++ b/cmd/promtool/rules_test.go @@ -28,13 +28,14 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) type mockQueryRangeAPI struct { samples model.Matrix } -func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { +func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive return mockAPI.samples, v1.Warnings{}, nil } @@ -99,7 +100,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.Equal(t, 1, len(gRules)) require.Equal(t, "rule1", gRules[0].Name()) require.Equal(t, "ruleExpr", gRules[0].Query().String()) - require.Equal(t, 1, len(gRules[0].Labels())) + require.Equal(t, 1, gRules[0].Labels().Len()) group2 := ruleImporter.groups[path2+";group2"] require.NotNil(t, group2) @@ -108,7 +109,7 @@ func TestBackfillRuleIntegration(t *testing.T) { require.Equal(t, 2, len(g2Rules)) require.Equal(t, "grp2_rule1", g2Rules[0].Name()) require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String()) - require.Equal(t, 0, len(g2Rules[0].Labels())) + require.Equal(t, 0, g2Rules[0].Labels().Len()) // Backfill all recording rules then check the blocks to confirm the correct data was created. errs = ruleImporter.importAll(ctx) @@ -131,15 +132,15 @@ func TestBackfillRuleIntegration(t *testing.T) { for selectedSeries.Next() { seriesCount++ series := selectedSeries.At() - if len(series.Labels()) != 3 { - require.Equal(t, 2, len(series.Labels())) + if series.Labels().Len() != 3 { + require.Equal(t, 2, series.Labels().Len()) x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1") require.Equal(t, x, series.Labels()) } else { - require.Equal(t, 3, len(series.Labels())) + require.Equal(t, 3, series.Labels().Len()) } - it := series.Iterator() - for it.Next() { + it := series.Iterator(nil) + for it.Next() == chunkenc.ValFloat { samplesCount++ ts, v := it.At() if v == testValue { @@ -160,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) { } } -func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { +func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { logger := log.NewNopLogger() cfg := ruleImporterConfig{ outputDir: tmpDir, diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go index 981a67af19..7c5ae70365 100644 --- a/cmd/promtool/sd.go +++ b/cmd/promtool/sd.go @@ -47,9 +47,15 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault } var scrapeConfig *config.ScrapeConfig + scfgs, err := cfg.GetScrapeConfigs() + if err != nil { + fmt.Fprintln(os.Stderr, "Cannot load scrape configs", err) + return failureExitCode + } + jobs := []string{} jobMatched := false - for _, v := range cfg.ScrapeConfigs { + for _, v := range scfgs { jobs = append(jobs, v.JobName) if v.JobName == sdJobName { jobMatched = true @@ -109,22 +115,22 @@ outerLoop: func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { sdCheckResults := []sdCheckResult{} + lb := labels.NewBuilder(labels.EmptyLabels()) for _, targetGroup := range targetGroups { for _, target := range targetGroup.Targets { - labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels)) + lb.Reset(labels.EmptyLabels()) for name, value := range target { - labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + lb.Set(string(name), string(value)) } for name, value := range targetGroup.Labels { if _, ok := target[name]; !ok { - labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)}) + lb.Set(string(name), string(value)) } } - targetLabels := labels.New(labelSlice...) - res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig, noDefaultScrapePort) + res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) result := sdCheckResult{ DiscoveredLabels: orig, Labels: res, diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 7c7c8f6ec0..4e27f69c05 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -23,24 +23,25 @@ import ( "path/filepath" "runtime" "runtime/pprof" - "sort" "strconv" "strings" "sync" "text/tabwriter" "time" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/index" - "github.com/alecthomas/units" "github.com/go-kit/log" + "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" ) const timeDelta = 30000 @@ -314,7 +315,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { i := 0 for scanner.Scan() && i < n { - m := make(labels.Labels, 0, 10) + m := make([]labels.Label, 0, 10) r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) @@ -324,13 +325,12 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { split := strings.Split(labelChunk, ":") m = append(m, labels.Label{Name: split[0], Value: split[1]}) } - // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - sort.Sort(m) - h := m.Hash() + ml := labels.New(m...) // This sorts by name - order of the k/v labels matters, don't assume we'll always receive them already sorted. + h := ml.Hash() if _, ok := hashes[h]; ok { continue } - mets = append(mets, m) + mets = append(mets, ml) hashes[h] = struct{}{} i++ } @@ -397,25 +397,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) if err != nil { return nil, nil, err } - blocks, err := db.Blocks() + + if blockID == "" { + blockID, err = db.LastBlockID() + if err != nil { + return nil, nil, err + } + } + + b, err := db.Block(blockID) if err != nil { return nil, nil, err } - var block tsdb.BlockReader - if blockID != "" { - for _, b := range blocks { - if b.Meta().ULID.String() == blockID { - block = b - break - } - } - } else if len(blocks) > 0 { - block = blocks[len(blocks)-1] - } - if block == nil { - return nil, nil, fmt.Errorf("block %s not found", blockID) - } - return db, block, nil + + return db, b, nil } func analyzeBlock(path, blockID string, limit int, runExtended bool) error { @@ -451,7 +446,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error { postingInfos := []postingInfo{} printInfo := func(postingInfos []postingInfo) { - sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric }) + slices.SortFunc(postingInfos, func(a, b postingInfo) bool { return a.metric > b.metric }) for i, pc := range postingInfos { if i >= limit { @@ -469,21 +464,21 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error { if err != nil { return err } - lbls := labels.Labels{} chks := []chunks.Meta{} + builder := labels.ScratchBuilder{} for p.Next() { - if err = ir.Series(p.At(), &lbls, &chks); err != nil { + if err = ir.Series(p.At(), &builder, &chks); err != nil { return err } // Amount of the block time range not covered by this series. uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime) - for _, lbl := range lbls { + builder.Labels().Range(func(lbl labels.Label) { key := lbl.Name + "=" + lbl.Value labelsUncovered[lbl.Name] += uncovered labelpairsUncovered[key] += uncovered labelpairsCount[key]++ entries++ - } + }) } if p.Err() != nil { return p.Err() @@ -588,10 +583,10 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err nBuckets := 10 histogram := make([]int, nBuckets) totalChunks := 0 + var builder labels.ScratchBuilder for postingsr.Next() { - lbsl := labels.Labels{} var chks []chunks.Meta - if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil { + if err := indexr.Series(postingsr.At(), &builder, &chks); err != nil { return err } @@ -624,7 +619,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err return nil } -func dumpSamples(path string, mint, maxt int64) (err error) { +func dumpSamples(path string, mint, maxt int64, match string) (err error) { db, err := tsdb.OpenDBReadOnly(path, nil) if err != nil { return err @@ -638,13 +633,17 @@ func dumpSamples(path string, mint, maxt int64) (err error) { } defer q.Close() - ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) + matchers, err := parser.ParseMetricSelector(match) + if err != nil { + return err + } + ss := q.Select(false, nil, matchers...) for ss.Next() { series := ss.At() lbs := series.Labels() - it := series.Iterator() - for it.Next() { + it := series.Iterator(nil) + for it.Next() == chunkenc.ValFloat { ts, val := it.At() fmt.Printf("%s %g %d\n", lbs, val, ts) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 744235ddae..e934f37c85 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error { if err != nil { return err } - if len(m) <= 0 { + if len(m) == 0 { fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf) } globbedFiles = append(globbedFiles, m...) @@ -284,8 +284,8 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i for _, a := range ar.ActiveAlerts() { if a.State == rules.StateFiring { alerts = append(alerts, labelAndAnnotation{ - Labels: append(labels.Labels{}, a.Labels...), - Annotations: append(labels.Labels{}, a.Annotations...), + Labels: a.Labels.Copy(), + Annotations: a.Annotations.Copy(), }) } } @@ -347,7 +347,7 @@ Outer: for _, s := range got { gotSamples = append(gotSamples, parsedSample{ Labels: s.Metric.Copy(), - Value: s.V, + Value: s.F, }) } @@ -434,7 +434,7 @@ func (tg *testGroup) maxEvalTime() time.Duration { } func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) { - q, err := engine.NewInstantQuery(qu, nil, qs, t) + q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t) if err != nil { return nil, err } @@ -447,7 +447,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q return v, nil case promql.Scalar: return promql.Vector{promql.Sample{ - Point: promql.Point(v), + T: v.T, + F: v.V, Metric: labels.Labels{}, }}, nil default: diff --git a/config/config.go b/config/config.go index a13f397f81..d32fcc33c9 100644 --- a/config/config.go +++ b/config/config.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage/remote/azuread" ) var ( @@ -80,7 +81,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return cfg, nil } - for i, v := range cfg.GlobalConfig.ExternalLabels { + b := labels.ScratchBuilder{} + cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { if s == "$" { return "$" @@ -93,10 +95,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro }) if newV != v.Value { level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) - v.Value = newV - cfg.GlobalConfig.ExternalLabels[i] = v } - } + b.Add(v.Name, newV) + }) + cfg.GlobalConfig.ExternalLabels = b.Labels() return cfg, nil } @@ -112,10 +114,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log. } if agentMode { - if len(cfg.RemoteWriteConfigs) == 0 { - return nil, errors.New("at least one remote_write target must be specified in agent mode") - } - if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { return nil, errors.New("field alerting is not allowed in agent mode") } @@ -149,13 +147,14 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the - // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, + // ScrapeTimeout and ScrapeInterval default to the configured + // globals. + ScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -176,16 +175,16 @@ var ( // DefaultQueueConfig is the default remote queue configuration. DefaultQueueConfig = QueueConfig{ - // With a maximum of 200 shards, assuming an average of 100ms remote write - // time and 500 samples per batch, we will be able to push 1M samples/s. - MaxShards: 200, + // With a maximum of 50 shards, assuming an average of 100ms remote write + // time and 2000 samples per batch, we will be able to push 1M samples/s. + MaxShards: 50, MinShards: 1, - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, - // Each shard will have a max of 2500 samples pending in its channel, plus the pending - // samples that have been enqueued. Theoretically we should only ever have about 3000 samples - // per shard pending. At 200 shards that's 600k. - Capacity: 2500, + // Each shard will have a max of 10,000 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples + // per shard pending. At 50 shards that's 600k. + Capacity: 10000, BatchSendDeadline: model.Duration(5 * time.Second), // Backoff times for retrying a batch of samples on recoverable errors. @@ -197,7 +196,7 @@ var ( DefaultMetadataConfig = MetadataConfig{ Send: true, SendInterval: model.Duration(1 * time.Minute), - MaxSamplesPerSend: 500, + MaxSamplesPerSend: 2000, } // DefaultRemoteReadConfig is the default remote read configuration. @@ -219,12 +218,13 @@ var ( // Config is the top-level configuration for Prometheus's config files. type Config struct { - GlobalConfig GlobalConfig `yaml:"global"` - AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` - RuleFiles []string `yaml:"rule_files,omitempty"` - ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` - StorageConfig StorageConfig `yaml:"storage,omitempty"` - TracingConfig TracingConfig `yaml:"tracing,omitempty"` + GlobalConfig GlobalConfig `yaml:"global"` + AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` + StorageConfig StorageConfig `yaml:"storage,omitempty"` + TracingConfig TracingConfig `yaml:"tracing,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` @@ -238,6 +238,9 @@ func (c *Config) SetDirectory(dir string) { for i, file := range c.RuleFiles { c.RuleFiles[i] = config.JoinDir(dir, file) } + for i, file := range c.ScrapeConfigFiles { + c.ScrapeConfigFiles[i] = config.JoinDir(dir, file) + } for _, c := range c.ScrapeConfigs { c.SetDirectory(dir) } @@ -257,6 +260,58 @@ func (c Config) String() string { return string(b) } +// ScrapeConfigs returns the scrape configurations. +func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + + jobNames := map[string]string{} + for i, scfg := range c.ScrapeConfigs { + // We do these checks for library users that would not call Validate in + // Unmarshal. + if err := scfg.Validate(c.GlobalConfig); err != nil { + return nil, err + } + + if _, ok := jobNames[scfg.JobName]; ok { + return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = "main config file" + scfgs[i] = scfg + } + for _, pat := range c.ScrapeConfigFiles { + fs, err := filepath.Glob(pat) + if err != nil { + // The only error can be a bad pattern. + return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err) + } + for _, filename := range fs { + cfg := ScrapeConfigs{} + content, err := os.ReadFile(filename) + if err != nil { + return nil, fileErr(filename, err) + } + err = yaml.UnmarshalStrict(content, &cfg) + if err != nil { + return nil, fileErr(filename, err) + } + for _, scfg := range cfg.ScrapeConfigs { + if err := scfg.Validate(c.GlobalConfig); err != nil { + return nil, fileErr(filename, err) + } + + if f, ok := jobNames[scfg.JobName]; ok { + return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f)) + } + jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename)) + + scfg.SetDirectory(filepath.Dir(filename)) + scfgs = append(scfgs, scfg) + } + } + } + return scfgs, nil +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig @@ -279,26 +334,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("invalid rule file path %q", rf) } } + + for _, sf := range c.ScrapeConfigFiles { + if !patRulePath.MatchString(sf) { + return fmt.Errorf("invalid scrape config file path %q", sf) + } + } + // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if scfg == nil { - return errors.New("empty or null scrape config section") - } - // First set the correct scrape interval, then check that the timeout - // (inferred or explicit) is not greater than that. - if scfg.ScrapeInterval == 0 { - scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval - } - if scfg.ScrapeTimeout > scfg.ScrapeInterval { - return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) - } - if scfg.ScrapeTimeout == 0 { - if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { - scfg.ScrapeTimeout = scfg.ScrapeInterval - } else { - scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout - } + if err := scfg.Validate(c.GlobalConfig); err != nil { + return err } if _, ok := jobNames[scfg.JobName]; ok { @@ -344,6 +391,24 @@ type GlobalConfig struct { QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. 0 means no limit. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. 0 means no limit. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. 0 means no limit. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -361,13 +426,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - for _, l := range gc.ExternalLabels { + if err := gc.ExternalLabels.Validate(func(l labels.Label) error { if !model.LabelName(l.Name).IsValid() { return fmt.Errorf("%q is not a valid label name", l.Name) } if !model.LabelValue(l.Value).IsValid() { return fmt.Errorf("%q is not a valid label value", l.Value) } + return nil + }); err != nil { + return err } // First set the correct scrape interval, then check that the timeout @@ -394,13 +462,17 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // isZero returns true iff the global config is the zero value. func (c *GlobalConfig) isZero() bool { - return c.ExternalLabels == nil && + return c.ExternalLabels.IsEmpty() && c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && c.QueryLogFile == "" } +type ScrapeConfigs struct { + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` +} + // ScrapeConfig configures a scraping unit for Prometheus. type ScrapeConfig struct { // The job name to which the job label is set by default. @@ -415,6 +487,8 @@ type ScrapeConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // Whether to scrape a classic histogram that is also exposed as a native histogram. + ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -423,20 +497,23 @@ type ScrapeConfig struct { // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` // More than this many samples post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the - // scrapes to fail. + // scrapes to fail. 0 means no limit. TargetLimit uint `yaml:"target_limit,omitempty"` // More than this many labels post metric-relabeling will cause the scrape to - // fail. + // fail. 0 means no limit. LabelLimit uint `yaml:"label_limit,omitempty"` // More than this label name length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` // More than this label value length post metric-relabeling will cause the - // scrape to fail. + // scrape to fail. 0 means no limit. LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + // More than this many buckets in a native histogram will cause the scrape to + // fail. + NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -494,6 +571,47 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { + if c == nil { + return errors.New("empty or null scrape config section") + } + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if c.ScrapeInterval == 0 { + c.ScrapeInterval = globalConfig.ScrapeInterval + } + if c.ScrapeTimeout > c.ScrapeInterval { + return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) + } + if c.ScrapeTimeout == 0 { + if globalConfig.ScrapeTimeout > c.ScrapeInterval { + c.ScrapeTimeout = c.ScrapeInterval + } else { + c.ScrapeTimeout = globalConfig.ScrapeTimeout + } + } + if c.BodySizeLimit == 0 { + c.BodySizeLimit = globalConfig.BodySizeLimit + } + if c.SampleLimit == 0 { + c.SampleLimit = globalConfig.SampleLimit + } + if c.TargetLimit == 0 { + c.TargetLimit = globalConfig.TargetLimit + } + if c.LabelLimit == 0 { + c.LabelLimit = globalConfig.LabelLimit + } + if c.LabelNameLengthLimit == 0 { + c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit + } + if c.LabelValueLengthLimit == 0 { + c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit + } + + return nil +} + // MarshalYAML implements the yaml.Marshaler interface. func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) @@ -776,12 +894,13 @@ func CheckTargetAddress(address model.LabelValue) error { // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` - Name string `yaml:"name,omitempty"` - SendExemplars bool `yaml:"send_exemplars,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` + Name string `yaml:"name,omitempty"` + SendExemplars bool `yaml:"send_exemplars,omitempty"` + SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -789,6 +908,7 @@ type RemoteWriteConfig struct { QueueConfig QueueConfig `yaml:"queue_config,omitempty"` MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` + AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -825,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil - if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + } + + if c.SigV4Config != nil && c.AzureADConfig != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") } return nil @@ -847,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) @@ -935,3 +1059,15 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro // Thus we just do its validation here. return c.HTTPClientConfig.Validate() } + +func filePath(filename string) string { + absPath, err := filepath.Abs(filename) + if err != nil { + return filename + } + return absPath +} + +func fileErr(filename string, err error) error { + return fmt.Errorf("%q: %w", filePath(filename), err) +} diff --git a/config/config_test.go b/config/config_test.go index 9cab2f9e05..d3288cc90d 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/discovery/moby" "github.com/prometheus/prometheus/discovery/nomad" "github.com/prometheus/prometheus/discovery/openstack" + "github.com/prometheus/prometheus/discovery/ovhcloud" "github.com/prometheus/prometheus/discovery/puppetdb" "github.com/prometheus/prometheus/discovery/scaleway" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -67,6 +68,15 @@ func mustParseURL(u string) *config.URL { return &config.URL{URL: parsed} } +const ( + globBodySizeLimit = 15 * units.MiB + globSampleLimit = 1500 + globTargetLimit = 30 + globLabelLimit = 30 + globLabelNameLengthLimit = 200 + globLabelValueLengthLimit = 200 +) + var expectedConf = &Config{ GlobalConfig: GlobalConfig{ ScrapeInterval: model.Duration(15 * time.Second), @@ -75,6 +85,13 @@ var expectedConf = &Config{ QueryLogFile: "", ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"), + + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, }, RuleFiles: []string{ @@ -164,10 +181,16 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -216,36 +239,59 @@ var expectedConf = &Config{ Regex: relabel.MustNewRegexp("(.*)some-[regex]"), Replacement: "foo-${1}", Action: relabel.Replace, - }, { + }, + { SourceLabels: model.LabelNames{"abc"}, TargetLabel: "cde", Separator: ";", Regex: relabel.DefaultRelabelConfig.Regex, Replacement: relabel.DefaultRelabelConfig.Replacement, Action: relabel.Replace, - }, { + }, + { TargetLabel: "abc", Separator: ";", Regex: relabel.DefaultRelabelConfig.Regex, Replacement: "static", Action: relabel.Replace, - }, { + }, + { TargetLabel: "abc", Separator: ";", Regex: relabel.MustNewRegexp(""), Replacement: "static", Action: relabel.Replace, }, + { + SourceLabels: model.LabelNames{"foo"}, + TargetLabel: "abc", + Action: relabel.KeepEqual, + Regex: relabel.DefaultRelabelConfig.Regex, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Separator: relabel.DefaultRelabelConfig.Separator, + }, + { + SourceLabels: model.LabelNames{"foo"}, + TargetLabel: "abc", + Action: relabel.DropEqual, + Regex: relabel.DefaultRelabelConfig.Regex, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Separator: relabel.DefaultRelabelConfig.Separator, + }, }, }, { JobName: "service-x", - HonorTimestamps: true, - ScrapeInterval: model.Duration(50 * time.Second), - ScrapeTimeout: model.Duration(5 * time.Second), - BodySizeLimit: 10 * units.MiB, - SampleLimit: 1000, + HonorTimestamps: true, + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + BodySizeLimit: 10 * units.MiB, + SampleLimit: 1000, + TargetLimit: 35, + LabelLimit: 35, + LabelNameLengthLimit: 210, + LabelValueLengthLimit: 210, HTTPClientConfig: config.HTTPClientConfig{ BasicAuth: &config.BasicAuth{ @@ -332,9 +378,15 @@ var expectedConf = &Config{ { JobName: "service-y", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -343,6 +395,7 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &consul.SDConfig{ Server: "localhost:1234", + PathPrefix: "/consul", Token: "mysecret", Services: []string{"nginx", "cache", "mysql"}, ServiceTags: []string{"canary", "v1"}, @@ -378,9 +431,15 @@ var expectedConf = &Config{ { JobName: "service-z", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/metrics", Scheme: "http", @@ -403,9 +462,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -434,9 +499,15 @@ var expectedConf = &Config{ { JobName: "service-kubernetes-namespaces", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -465,9 +536,15 @@ var expectedConf = &Config{ { JobName: "service-kuma", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -485,9 +562,15 @@ var expectedConf = &Config{ { JobName: "service-marathon", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -514,9 +597,15 @@ var expectedConf = &Config{ { JobName: "service-nomad", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -540,9 +629,15 @@ var expectedConf = &Config{ { JobName: "service-ec2", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -566,15 +661,22 @@ var expectedConf = &Config{ Values: []string{"web", "db"}, }, }, + HTTPClientConfig: config.DefaultHTTPClientConfig, }, }, }, { JobName: "service-lightsail", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -582,21 +684,28 @@ var expectedConf = &Config{ ServiceDiscoveryConfigs: discovery.Configs{ &aws.LightsailSDConfig{ - Region: "us-east-1", - AccessKey: "access", - SecretKey: "mysecret", - Profile: "profile", - RefreshInterval: model.Duration(60 * time.Second), - Port: 80, + Region: "us-east-1", + AccessKey: "access", + SecretKey: "mysecret", + Profile: "profile", + RefreshInterval: model.Duration(60 * time.Second), + Port: 80, + HTTPClientConfig: config.DefaultHTTPClientConfig, }, }, }, { JobName: "service-azure", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -620,9 +729,15 @@ var expectedConf = &Config{ { JobName: "service-nerve", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -639,9 +754,15 @@ var expectedConf = &Config{ { JobName: "0123service-xxx", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -661,9 +782,15 @@ var expectedConf = &Config{ { JobName: "badfederation", - HonorTimestamps: false, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: false, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: "/federate", Scheme: DefaultScrapeConfig.Scheme, @@ -683,9 +810,15 @@ var expectedConf = &Config{ { JobName: "測試", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -705,9 +838,15 @@ var expectedConf = &Config{ { JobName: "httpsd", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -724,9 +863,15 @@ var expectedConf = &Config{ { JobName: "service-triton", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -751,9 +896,15 @@ var expectedConf = &Config{ { JobName: "digitalocean-droplets", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -777,9 +928,15 @@ var expectedConf = &Config{ { JobName: "docker", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -799,9 +956,15 @@ var expectedConf = &Config{ { JobName: "dockerswarm", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -821,9 +984,15 @@ var expectedConf = &Config{ { JobName: "service-openstack", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -847,9 +1016,15 @@ var expectedConf = &Config{ { JobName: "service-puppetdb", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -875,10 +1050,16 @@ var expectedConf = &Config{ }, }, { - JobName: "hetzner", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "hetzner", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -924,9 +1105,15 @@ var expectedConf = &Config{ { JobName: "service-eureka", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -940,12 +1127,55 @@ var expectedConf = &Config{ }, }, }, + { + JobName: "ovhcloud", + + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + + HTTPClientConfig: config.DefaultHTTPClientConfig, + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfigs: discovery.Configs{ + &ovhcloud.SDConfig{ + Endpoint: "ovh-eu", + ApplicationKey: "testAppKey", + ApplicationSecret: "testAppSecret", + ConsumerKey: "testConsumerKey", + RefreshInterval: model.Duration(60 * time.Second), + Service: "vps", + }, + &ovhcloud.SDConfig{ + Endpoint: "ovh-eu", + ApplicationKey: "testAppKey", + ApplicationSecret: "testAppSecret", + ConsumerKey: "testConsumerKey", + RefreshInterval: model.Duration(60 * time.Second), + Service: "dedicated_server", + }, + }, + }, { JobName: "scaleway", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -978,9 +1208,15 @@ var expectedConf = &Config{ { JobName: "linode-instances", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1005,9 +1241,16 @@ var expectedConf = &Config{ { JobName: "uyuni", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, + HTTPClientConfig: config.DefaultHTTPClientConfig, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1024,10 +1267,16 @@ var expectedConf = &Config{ }, }, { - JobName: "ionos", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + JobName: "ionos", + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1049,9 +1298,15 @@ var expectedConf = &Config{ { JobName: "vultr", - HonorTimestamps: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + BodySizeLimit: globBodySizeLimit, + SampleLimit: globSampleLimit, + TargetLimit: globTargetLimit, + LabelLimit: globLabelLimit, + LabelNameLengthLimit: globLabelNameLengthLimit, + LabelValueLengthLimit: globLabelValueLengthLimit, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -1175,7 +1430,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Equal(t, 18, len(matches), "wrong number of secret matches found") + require.Equal(t, 22, len(matches), "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } @@ -1286,6 +1541,22 @@ var expectedErrors = []struct { filename: "labeldrop5.bad.yml", errMsg: "labeldrop action requires only 'regex', and no other fields", }, + { + filename: "dropequal.bad.yml", + errMsg: "relabel configuration for dropequal action requires 'target_label' value", + }, + { + filename: "dropequal1.bad.yml", + errMsg: "dropequal action requires only 'source_labels' and `target_label`, and no other fields", + }, + { + filename: "keepequal.bad.yml", + errMsg: "relabel configuration for keepequal action requires 'target_label' value", + }, + { + filename: "keepequal1.bad.yml", + errMsg: "keepequal action requires only 'source_labels' and `target_label`, and no other fields", + }, { filename: "labelmap.bad.yml", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", @@ -1456,7 +1727,7 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, }, { filename: "remote_write_url_missing.bad.yml", @@ -1618,6 +1889,18 @@ var expectedErrors = []struct { filename: "ionos_datacenter.bad.yml", errMsg: "datacenter id can't be empty", }, + { + filename: "ovhcloud_no_secret.bad.yml", + errMsg: "application secret can not be empty", + }, + { + filename: "ovhcloud_bad_service.bad.yml", + errMsg: "unknown service: fakeservice", + }, + { + filename: "scrape_config_files_glob.bad.yml", + errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`, + }, } func TestBadConfigs(t *testing.T) { @@ -1670,6 +1953,33 @@ func TestExpandExternalLabels(t *testing.T) { require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) } +func TestAgentMode(t *testing.T) { + _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger()) + require.ErrorContains(t, err, "field alerting is not allowed in agent mode") + + _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger()) + require.ErrorContains(t, err, "field alerting is not allowed in agent mode") + + _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger()) + require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") + + _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger()) + require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") + + c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) + require.NoError(t, err) + require.Len(t, c.RemoteWriteConfigs, 0) + + c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) + require.NoError(t, err) + require.Len(t, c.RemoteWriteConfigs, 1) + require.Equal( + t, + "http://remote1/push", + c.RemoteWriteConfigs[0].URL.String(), + ) +} + func TestEmptyGlobalBlock(t *testing.T) { c, err := Load("global:\n", false, log.NewNopLogger()) require.NoError(t, err) @@ -1677,6 +1987,156 @@ func TestEmptyGlobalBlock(t *testing.T) { require.Equal(t, exp, *c) } +func TestGetScrapeConfigs(t *testing.T) { + sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig { + return &ScrapeConfig{ + JobName: jobName, + HonorTimestamps: true, + ScrapeInterval: scrapeInterval, + ScrapeTimeout: scrapeTimeout, + MetricsPath: "/metrics", + Scheme: "http", + HTTPClientConfig: config.DefaultHTTPClientConfig, + ServiceDiscoveryConfigs: discovery.Configs{ + discovery.StaticConfig{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: "localhost:8080", + }, + }, + Source: "0", + }, + }, + }, + } + } + + testCases := []struct { + name string + configFile string + expectedResult []*ScrapeConfig + expectedError string + }{ + { + name: "An included config file should be a valid global config.", + configFile: "testdata/scrape_config_files.good.yml", + expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))}, + }, + { + name: "An global config that only include a scrape config file.", + configFile: "testdata/scrape_config_files_only.good.yml", + expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second))}, + }, + { + name: "An global config that combine scrape config files and scrape configs.", + configFile: "testdata/scrape_config_files_combined.good.yml", + expectedResult: []*ScrapeConfig{ + sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second)), + sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second)), + sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second)), + }, + }, + { + name: "An global config that includes a scrape config file with globs", + configFile: "testdata/scrape_config_files_glob.good.yml", + expectedResult: []*ScrapeConfig{ + { + JobName: "prometheus", + + HonorTimestamps: true, + ScrapeInterval: model.Duration(60 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + HTTPClientConfig: config.HTTPClientConfig{ + TLSConfig: config.TLSConfig{ + CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/scrape_configs/valid_key_file"), + }, + FollowRedirects: true, + EnableHTTP2: true, + }, + + ServiceDiscoveryConfigs: discovery.Configs{ + discovery.StaticConfig{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:8080"}, + }, + Source: "0", + }, + }, + }, + }, + { + JobName: "node", + + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HTTPClientConfig: config.HTTPClientConfig{ + TLSConfig: config.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, + FollowRedirects: true, + EnableHTTP2: true, + }, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfigs: discovery.Configs{ + &vultr.SDConfig{ + HTTPClientConfig: config.HTTPClientConfig{ + Authorization: &config.Authorization{ + Type: "Bearer", + Credentials: "abcdef", + }, + FollowRedirects: true, + EnableHTTP2: true, + }, + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + }, + }, + }, + }, + }, + { + name: "An global config that includes twice the same scrape configs.", + configFile: "testdata/scrape_config_files_double_import.bad.yml", + expectedError: `found multiple scrape configs with job name "prometheus"`, + }, + { + name: "An global config that includes a scrape config identical to a scrape config in the main file.", + configFile: "testdata/scrape_config_files_duplicate.bad.yml", + expectedError: `found multiple scrape configs with job name "prometheus"`, + }, + { + name: "An global config that includes a scrape config file with errors.", + configFile: "testdata/scrape_config_files_global.bad.yml", + expectedError: `scrape timeout greater than scrape interval for scrape config with job name "prometheus"`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger()) + require.NoError(t, err) + + scfgs, err := c.GetScrapeConfigs() + if len(tc.expectedError) > 0 { + require.ErrorContains(t, err, tc.expectedError) + } + require.Equal(t, tc.expectedResult, scfgs) + }) + } +} + func kubernetesSDHostURL() config.URL { tURL, _ := url.Parse("https://localhost:1234") return config.URL{URL: tURL} diff --git a/config/testdata/agent_mode.good.yml b/config/testdata/agent_mode.good.yml new file mode 100644 index 0000000000..a16612095c --- /dev/null +++ b/config/testdata/agent_mode.good.yml @@ -0,0 +1,2 @@ +remote_write: + - url: http://remote1/push diff --git a/config/testdata/agent_mode.with_alert_manager.yml b/config/testdata/agent_mode.with_alert_manager.yml new file mode 100644 index 0000000000..9a3929957f --- /dev/null +++ b/config/testdata/agent_mode.with_alert_manager.yml @@ -0,0 +1,6 @@ +alerting: + alertmanagers: + - scheme: https + static_configs: + - targets: + - "1.2.3.4:9093" diff --git a/config/testdata/agent_mode.with_alert_relabels.yml b/config/testdata/agent_mode.with_alert_relabels.yml new file mode 100644 index 0000000000..67e70fc7f4 --- /dev/null +++ b/config/testdata/agent_mode.with_alert_relabels.yml @@ -0,0 +1,5 @@ +alerting: + alert_relabel_configs: + - action: uppercase + source_labels: [instance] + target_label: instance diff --git a/config/testdata/agent_mode.with_remote_reads.yml b/config/testdata/agent_mode.with_remote_reads.yml new file mode 100644 index 0000000000..416676793b --- /dev/null +++ b/config/testdata/agent_mode.with_remote_reads.yml @@ -0,0 +1,5 @@ +remote_read: + - url: http://remote1/read + read_recent: true + name: default + enable_http2: false diff --git a/config/testdata/agent_mode.with_rule_files.yml b/config/testdata/agent_mode.with_rule_files.yml new file mode 100644 index 0000000000..3aaa9ad471 --- /dev/null +++ b/config/testdata/agent_mode.with_rule_files.yml @@ -0,0 +1,3 @@ +rule_files: + - "first.rules" + - "my/*.rules" diff --git a/config/testdata/agent_mode.without_remote_writes.yml b/config/testdata/agent_mode.without_remote_writes.yml new file mode 100644 index 0000000000..1285bef674 --- /dev/null +++ b/config/testdata/agent_mode.without_remote_writes.yml @@ -0,0 +1,2 @@ +global: + scrape_interval: 15s diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index c19b7c1e6d..19cfe1eb5d 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -2,6 +2,12 @@ global: scrape_interval: 15s evaluation_interval: 30s + body_size_limit: 15MB + sample_limit: 1500 + target_limit: 30 + label_limit: 30 + label_name_length_limit: 200 + label_value_length_limit: 200 # scrape_timeout is set to the global default (10s). external_labels: @@ -87,6 +93,12 @@ scrape_configs: - regex: replacement: static target_label: abc + - source_labels: [foo] + target_label: abc + action: keepequal + - source_labels: [foo] + target_label: abc + action: dropequal authorization: credentials_file: valid_token_file @@ -105,6 +117,11 @@ scrape_configs: body_size_limit: 10MB sample_limit: 1000 + target_limit: 35 + label_limit: 35 + label_name_length_limit: 210 + label_value_length_limit: 210 + metrics_path: /my_path scheme: https @@ -145,6 +162,7 @@ scrape_configs: consul_sd_configs: - server: "localhost:1234" token: mysecret + path_prefix: /consul services: ["nginx", "cache", "mysql"] tags: ["canary", "v1"] node_meta: @@ -349,6 +367,21 @@ scrape_configs: eureka_sd_configs: - server: "http://eureka.example.com:8761/eureka" + - job_name: ovhcloud + ovhcloud_sd_configs: + - service: vps + endpoint: ovh-eu + application_key: testAppKey + application_secret: testAppSecret + consumer_key: testConsumerKey + refresh_interval: 1m + - service: dedicated_server + endpoint: ovh-eu + application_key: testAppKey + application_secret: testAppSecret + consumer_key: testConsumerKey + refresh_interval: 1m + - job_name: scaleway scaleway_sd_configs: - role: instance diff --git a/config/testdata/dropequal.bad.yml b/config/testdata/dropequal.bad.yml new file mode 100644 index 0000000000..32bd1a9641 --- /dev/null +++ b/config/testdata/dropequal.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: dropequal diff --git a/config/testdata/dropequal1.bad.yml b/config/testdata/dropequal1.bad.yml new file mode 100644 index 0000000000..b648c0db26 --- /dev/null +++ b/config/testdata/dropequal1.bad.yml @@ -0,0 +1,7 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: dropequal + regex: foo + target_label: bar diff --git a/config/testdata/keepequal.bad.yml b/config/testdata/keepequal.bad.yml new file mode 100644 index 0000000000..5f5662177a --- /dev/null +++ b/config/testdata/keepequal.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: keepequal diff --git a/config/testdata/keepequal1.bad.yml b/config/testdata/keepequal1.bad.yml new file mode 100644 index 0000000000..c4628314c6 --- /dev/null +++ b/config/testdata/keepequal1.bad.yml @@ -0,0 +1,7 @@ +scrape_configs: + - job_name: prometheus + relabel_configs: + - source_labels: [abcdef] + action: keepequal + regex: foo + target_label: bar diff --git a/config/testdata/ovhcloud_bad_service.bad.yml b/config/testdata/ovhcloud_bad_service.bad.yml new file mode 100644 index 0000000000..3c2fa03cc8 --- /dev/null +++ b/config/testdata/ovhcloud_bad_service.bad.yml @@ -0,0 +1,8 @@ +scrape_configs: + - ovhcloud_sd_configs: + - service: fakeservice + endpoint: ovh-eu + application_key: testAppKey + application_secret: testAppSecret + consumer_key: testConsumerKey + refresh_interval: 1m diff --git a/config/testdata/ovhcloud_no_secret.bad.yml b/config/testdata/ovhcloud_no_secret.bad.yml new file mode 100644 index 0000000000..1959bee870 --- /dev/null +++ b/config/testdata/ovhcloud_no_secret.bad.yml @@ -0,0 +1,7 @@ +scrape_configs: + - ovhcloud_sd_configs: + - service: dedicated_server + endpoint: ovh-eu + application_key: testAppKey + consumer_key: testConsumerKey + refresh_interval: 1m diff --git a/config/testdata/scrape_config_files.bad.yml b/config/testdata/scrape_config_files.bad.yml new file mode 100644 index 0000000000..1b7690899b --- /dev/null +++ b/config/testdata/scrape_config_files.bad.yml @@ -0,0 +1,6 @@ +scrape_configs: + - job_name: prometheus + scrape_interval: 10s + scrape_timeout: 20s + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files.good.yml b/config/testdata/scrape_config_files.good.yml new file mode 100644 index 0000000000..5a0c2df9d6 --- /dev/null +++ b/config/testdata/scrape_config_files.good.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files2.good.yml b/config/testdata/scrape_config_files2.good.yml new file mode 100644 index 0000000000..f10bdf06ee --- /dev/null +++ b/config/testdata/scrape_config_files2.good.yml @@ -0,0 +1,4 @@ +scrape_configs: + - job_name: alertmanager + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_combined.good.yml b/config/testdata/scrape_config_files_combined.good.yml new file mode 100644 index 0000000000..ee3b0909c1 --- /dev/null +++ b/config/testdata/scrape_config_files_combined.good.yml @@ -0,0 +1,7 @@ +scrape_config_files: + - scrape_config_files.good.yml + - scrape_config_files2.good.yml +scrape_configs: + - job_name: node + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_double_import.bad.yml b/config/testdata/scrape_config_files_double_import.bad.yml new file mode 100644 index 0000000000..b153d7f4bd --- /dev/null +++ b/config/testdata/scrape_config_files_double_import.bad.yml @@ -0,0 +1,3 @@ +scrape_config_files: + - scrape_config_files.good.yml + - scrape_config_files.good.yml diff --git a/config/testdata/scrape_config_files_duplicate.bad.yml b/config/testdata/scrape_config_files_duplicate.bad.yml new file mode 100644 index 0000000000..ceac36eaa8 --- /dev/null +++ b/config/testdata/scrape_config_files_duplicate.bad.yml @@ -0,0 +1,6 @@ +scrape_config_files: + - scrape_config_files.good.yml +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_glob.bad.yml b/config/testdata/scrape_config_files_glob.bad.yml new file mode 100644 index 0000000000..f3cd636764 --- /dev/null +++ b/config/testdata/scrape_config_files_glob.bad.yml @@ -0,0 +1,6 @@ +scrape_config_files: + - scrape_configs/*/* +scrape_configs: + - job_name: node + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_glob.good.yml b/config/testdata/scrape_config_files_glob.good.yml new file mode 100644 index 0000000000..d724045030 --- /dev/null +++ b/config/testdata/scrape_config_files_glob.good.yml @@ -0,0 +1,2 @@ +scrape_config_files: + - scrape_configs/*.yml diff --git a/config/testdata/scrape_config_files_global.bad.yml b/config/testdata/scrape_config_files_global.bad.yml new file mode 100644 index 0000000000..b825fa62c8 --- /dev/null +++ b/config/testdata/scrape_config_files_global.bad.yml @@ -0,0 +1,2 @@ +scrape_config_files: + - scrape_config_files.bad.yml diff --git a/config/testdata/scrape_config_files_global_duplicate.bad.yml b/config/testdata/scrape_config_files_global_duplicate.bad.yml new file mode 100644 index 0000000000..b6ec1a7fdb --- /dev/null +++ b/config/testdata/scrape_config_files_global_duplicate.bad.yml @@ -0,0 +1,11 @@ +global: + scrape_interval: 15s + +scrape_config_files: + - scrape_config_files.good.yml + - scrape_config_files.good.yml + +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/scrape_config_files_only.good.yml b/config/testdata/scrape_config_files_only.good.yml new file mode 100644 index 0000000000..9f2711d51f --- /dev/null +++ b/config/testdata/scrape_config_files_only.good.yml @@ -0,0 +1,2 @@ +scrape_config_files: + - scrape_config_files.good.yml diff --git a/config/testdata/scrape_configs/scrape_config_files1.good.yml b/config/testdata/scrape_configs/scrape_config_files1.good.yml new file mode 100644 index 0000000000..b0a05c532e --- /dev/null +++ b/config/testdata/scrape_configs/scrape_config_files1.good.yml @@ -0,0 +1,7 @@ +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file diff --git a/config/testdata/scrape_configs/scrape_config_files2.good.yml b/config/testdata/scrape_configs/scrape_config_files2.good.yml new file mode 100644 index 0000000000..468d022965 --- /dev/null +++ b/config/testdata/scrape_configs/scrape_config_files2.good.yml @@ -0,0 +1,9 @@ +scrape_configs: + - job_name: node + scrape_interval: 15s + tls_config: + cert_file: ../valid_cert_file + key_file: ../valid_key_file + vultr_sd_configs: + - authorization: + credentials: abcdef diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 7519f58da1..86d76627e1 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -66,8 +66,9 @@ const ( // DefaultEC2SDConfig is the default EC2 SD configuration. var DefaultEC2SDConfig = EC2SDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { @@ -91,6 +92,8 @@ type EC2SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` Filters []*EC2Filter `yaml:"filters"` + + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // Name returns the name of the EC2 Config. @@ -161,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { return d } -func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { if d.ec2 != nil { return d.ec2, nil } @@ -171,11 +174,17 @@ func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) { creds = nil } + client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "ec2_sd") + if err != nil { + return nil, err + } + sess, err := session.NewSessionWithOptions(session.Options{ Config: aws.Config{ Endpoint: &d.cfg.Endpoint, Region: &d.cfg.Region, Credentials: creds, + HTTPClient: client, }, Profile: d.cfg.Profile, }) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 016d78a67f..e671769ca3 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -56,8 +56,9 @@ const ( // DefaultLightsailSDConfig is the default Lightsail SD configuration. var DefaultLightsailSDConfig = LightsailSDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + HTTPClientConfig: config.DefaultHTTPClientConfig, } func init() { @@ -74,6 +75,8 @@ type LightsailSDConfig struct { RoleARN string `yaml:"role_arn,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` + + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` } // Name returns the name of the Lightsail Config. @@ -144,11 +147,17 @@ func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) { creds = nil } + client, err := config.NewClientFromConfig(d.cfg.HTTPClientConfig, "lightsail_sd") + if err != nil { + return nil, err + } + sess, err := session.NewSessionWithOptions(session.Options{ Config: aws.Config{ Endpoint: &d.cfg.Endpoint, Region: &d.cfg.Region, Credentials: creds, + HTTPClient: client, }, Profile: d.cfg.Profile, }) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 44576c79b9..098fbb4c5f 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -55,6 +55,7 @@ const ( azureLabelMachinePublicIP = azureLabel + "machine_public_ip" azureLabelMachineTag = azureLabel + "machine_tag_" azureLabelMachineScaleSet = azureLabel + "machine_scale_set" + azureLabelMachineSize = azureLabel + "machine_size" authMethodOAuth = "OAuth" authMethodManagedIdentity = "ManagedIdentity" @@ -261,6 +262,7 @@ type virtualMachine struct { ScaleSet string Tags map[string]*string NetworkInterfaces []string + Size string } // Create a new azureResource object from an ID string. @@ -343,6 +345,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { azureLabelMachineOSType: model.LabelValue(vm.OsType), azureLabelMachineLocation: model.LabelValue(vm.Location), azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), + azureLabelMachineSize: model.LabelValue(vm.Size), } if vm.ScaleSet != "" { @@ -514,6 +517,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { tags := map[string]*string{} networkInterfaces := []string{} var computerName string + var size string if vm.Tags != nil { tags = vm.Tags @@ -525,10 +529,13 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { } } - if vm.VirtualMachineProperties != nil && - vm.VirtualMachineProperties.OsProfile != nil && - vm.VirtualMachineProperties.OsProfile.ComputerName != nil { - computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName) + if vm.VirtualMachineProperties != nil { + if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil { + computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName) + } + if vm.VirtualMachineProperties.HardwareProfile != nil { + size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize) + } } return virtualMachine{ @@ -541,6 +548,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { ScaleSet: "", Tags: tags, NetworkInterfaces: networkInterfaces, + Size: size, } } @@ -549,6 +557,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin tags := map[string]*string{} networkInterfaces := []string{} var computerName string + var size string if vm.Tags != nil { tags = vm.Tags @@ -560,8 +569,13 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin } } - if vm.VirtualMachineScaleSetVMProperties != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile != nil { - computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName) + if vm.VirtualMachineScaleSetVMProperties != nil { + if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil { + computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName) + } + if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil { + size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize) + } } return virtualMachine{ @@ -574,6 +588,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin ScaleSet: scaleSetName, Tags: tags, NetworkInterfaces: networkInterfaces, + Size: size, } } diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 744d182de7..179b97ba61 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -28,6 +28,7 @@ func TestMain(m *testing.M) { func TestMapFromVMWithEmptyTags(t *testing.T) { id := "test" name := "name" + size := "size" vmType := "type" location := "westeurope" computerName := "computer_name" @@ -44,6 +45,9 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, + HardwareProfile: &compute.HardwareProfile{ + VMSize: compute.VirtualMachineSizeTypes(size), + }, } testVM := compute.VirtualMachine{ @@ -64,6 +68,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { OsType: "Linux", Tags: map[string]*string{}, NetworkInterfaces: []string{}, + Size: size, } actualVM := mapFromVM(testVM) @@ -74,6 +79,7 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { func TestMapFromVMWithTags(t *testing.T) { id := "test" name := "name" + size := "size" vmType := "type" location := "westeurope" computerName := "computer_name" @@ -93,6 +99,9 @@ func TestMapFromVMWithTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, + HardwareProfile: &compute.HardwareProfile{ + VMSize: compute.VirtualMachineSizeTypes(size), + }, } testVM := compute.VirtualMachine{ @@ -113,6 +122,7 @@ func TestMapFromVMWithTags(t *testing.T) { OsType: "Linux", Tags: tags, NetworkInterfaces: []string{}, + Size: size, } actualVM := mapFromVM(testVM) @@ -123,6 +133,7 @@ func TestMapFromVMWithTags(t *testing.T) { func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { id := "test" name := "name" + size := "size" vmType := "type" location := "westeurope" computerName := "computer_name" @@ -139,6 +150,9 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, + HardwareProfile: &compute.HardwareProfile{ + VMSize: compute.VirtualMachineSizeTypes(size), + }, } testVM := compute.VirtualMachineScaleSetVM{ @@ -161,6 +175,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { Tags: map[string]*string{}, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + Size: size, } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) @@ -171,6 +186,7 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { func TestMapFromVMScaleSetVMWithTags(t *testing.T) { id := "test" name := "name" + size := "size" vmType := "type" location := "westeurope" computerName := "computer_name" @@ -190,6 +206,9 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, + HardwareProfile: &compute.HardwareProfile{ + VMSize: compute.VirtualMachineSizeTypes(size), + }, } testVM := compute.VirtualMachineScaleSetVM{ @@ -212,6 +231,7 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { Tags: tags, NetworkInterfaces: []string{}, ScaleSet: scaleSet, + Size: size, } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 8d0a1fea48..99ea396b99 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -60,6 +60,8 @@ const ( datacenterLabel = model.MetaLabelPrefix + "consul_dc" // namespaceLabel is the name of the label containing the namespace (Consul Enterprise only). namespaceLabel = model.MetaLabelPrefix + "consul_namespace" + // partitionLabel is the name of the label containing the Admin Partition (Consul Enterprise only). + partitionLabel = model.MetaLabelPrefix + "consul_partition" // taggedAddressesLabel is the prefix for the labels mapping to a target's tagged addresses. taggedAddressesLabel = model.MetaLabelPrefix + "consul_tagged_address_" // serviceIDLabel is the name of the label containing the service ID. @@ -109,9 +111,11 @@ func init() { // SDConfig is the configuration for Consul service discovery. type SDConfig struct { Server string `yaml:"server,omitempty"` + PathPrefix string `yaml:"path_prefix,omitempty"` Token config.Secret `yaml:"token,omitempty"` Datacenter string `yaml:"datacenter,omitempty"` Namespace string `yaml:"namespace,omitempty"` + Partition string `yaml:"partition,omitempty"` TagSeparator string `yaml:"tag_separator,omitempty"` Scheme string `yaml:"scheme,omitempty"` Username string `yaml:"username,omitempty"` @@ -183,6 +187,7 @@ type Discovery struct { client *consul.Client clientDatacenter string clientNamespace string + clientPartition string tagSeparator string watchedServices []string // Set of services which will be discovered. watchedTags []string // Tags used to filter instances of a service. @@ -207,9 +212,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { clientConf := &consul.Config{ Address: conf.Server, + PathPrefix: conf.PathPrefix, Scheme: conf.Scheme, Datacenter: conf.Datacenter, Namespace: conf.Namespace, + Partition: conf.Partition, Token: string(conf.Token), HttpClient: wrapper, } @@ -227,6 +234,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { refreshInterval: time.Duration(conf.RefreshInterval), clientDatacenter: conf.Datacenter, clientNamespace: conf.Namespace, + clientPartition: conf.Partition, finalizer: wrapper.CloseIdleConnections, logger: logger, } @@ -547,6 +555,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr addressLabel: model.LabelValue(serviceNode.Node.Address), nodeLabel: model.LabelValue(serviceNode.Node.Node), namespaceLabel: model.LabelValue(serviceNode.Service.Namespace), + partitionLabel: model.LabelValue(serviceNode.Service.Partition), tagsLabel: model.LabelValue(tags), serviceAddressLabel: model.LabelValue(serviceNode.Service.Address), servicePortLabel: model.LabelValue(strconv.Itoa(serviceNode.Service.Port)), diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 9dc2d660bb..c929601638 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -365,7 +365,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { { // Define a handler that will return status 500. handler: func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(500) + w.WriteHeader(http.StatusInternalServerError) }, errMessage: "Unexpected response code: 500 ()", }, diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 2b11c242ab..96e07254f0 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms for _, lname := range conf.NameList(name) { response, err := lookupFromAnyServer(lname, qtype, conf, logger) - if err != nil { + switch { + case err != nil: // We can't go home yet, because a later name // may give us a valid, successful answer. However // we can no longer say "this name definitely doesn't // exist", because we did not get that answer for // at least one name. allResponsesValid = false - } else if response.Rcode == dns.RcodeSuccess { + case response.Rcode == dns.RcodeSuccess: // Outcome 1: GOLD! return response, nil } } if allResponsesValid { - // Outcome 2: everyone says NXDOMAIN, that's good enough for me + // Outcome 2: everyone says NXDOMAIN, that's good enough for me. return &dns.Msg{}, nil } // Outcome 3: boned. diff --git a/discovery/file/file.go b/discovery/file/file.go index 5aa7f2e5f4..60b63350f5 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -39,18 +39,23 @@ import ( ) var ( + fileSDReadErrorsCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_sd_file_read_errors_total", + Help: "The number of File-SD read errors.", + }) fileSDScanDuration = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "prometheus_sd_file_scan_duration_seconds", Help: "The duration of the File-SD scan in seconds.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) - fileSDReadErrorsCount = prometheus.NewCounter( + fileSDTimeStamp = NewTimestampCollector() + fileWatcherErrorsCount = prometheus.NewCounter( prometheus.CounterOpts{ - Name: "prometheus_sd_file_read_errors_total", - Help: "The number of File-SD read errors.", + Name: "prometheus_sd_file_watcher_errors_total", + Help: "The number of File-SD errors caused by filesystem watch failures.", }) - fileSDTimeStamp = NewTimestampCollector() patFileSDName = regexp.MustCompile(`^[^*]*(\*[^/]*)?\.(json|yml|yaml|JSON|YML|YAML)$`) @@ -62,7 +67,7 @@ var ( func init() { discovery.RegisterConfig(&SDConfig{}) - prometheus.MustRegister(fileSDScanDuration, fileSDReadErrorsCount, fileSDTimeStamp) + prometheus.MustRegister(fileSDReadErrorsCount, fileSDScanDuration, fileSDTimeStamp, fileWatcherErrorsCount) } // SDConfig is the configuration for file based discovery. @@ -221,8 +226,8 @@ func (d *Discovery) watchFiles() { panic("no watcher configured") } for _, p := range d.paths { - if idx := strings.LastIndex(p, "/"); idx > -1 { - p = p[:idx] + if dir, _ := filepath.Split(p); dir != "" { + p = dir } else { p = "./" } @@ -237,6 +242,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { watcher, err := fsnotify.NewWatcher() if err != nil { level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) + fileWatcherErrorsCount.Inc() return } d.watcher = watcher diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index aa406a1a7a..50afdc1ec3 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -59,7 +59,7 @@ type hcloudDiscovery struct { } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. -func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) { +func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ port: conf.Port, } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 4b7abaf77f..4960880289 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -51,7 +51,7 @@ type robotDiscovery struct { } // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. -func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) { +func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { d := &robotDiscovery{ port: conf.Port, endpoint: conf.robotEndpoint, @@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro return d, nil } -func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { req, err := http.NewRequest("GET", d.endpoint+"/server", nil) if err != nil { return nil, err diff --git a/discovery/install/install.go b/discovery/install/install.go index 36e13948d3..f090076b7f 100644 --- a/discovery/install/install.go +++ b/discovery/install/install.go @@ -33,6 +33,7 @@ import ( _ "github.com/prometheus/prometheus/discovery/moby" // register moby _ "github.com/prometheus/prometheus/discovery/nomad" // register nomad _ "github.com/prometheus/prometheus/discovery/openstack" // register openstack + _ "github.com/prometheus/prometheus/discovery/ovhcloud" // register ovhcloud _ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb _ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway _ "github.com/prometheus/prometheus/discovery/triton" // register triton diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go index 8ac3639705..a850fbbfb4 100644 --- a/discovery/ionos/server.go +++ b/discovery/ionos/server.go @@ -60,7 +60,7 @@ type serverDiscovery struct { datacenterID string } -func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) { +func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { d := &serverDiscovery{ port: conf.Port, datacenterID: conf.DatacenterID, diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go index 3a33e3e8d5..b316f7d885 100644 --- a/discovery/kubernetes/client_metrics.go +++ b/discovery/kubernetes/client_metrics.go @@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer ) } -func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) { +func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) { clientGoRequestResultMetricVec.WithLabelValues(code).Inc() } -func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) { +func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) { clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds()) } @@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name) } -func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { +func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric { // Retries are not used so the metric is omitted. return noopMetric{} } diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 1f39c23e71..3078cefde1 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many legitimately empty blocks in this file. package kubernetes import ( @@ -72,7 +73,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca queue: workqueue.NewNamed("endpoints"), } - e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { epAddCount.Inc() e.enqueue(o) @@ -86,6 +87,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca e.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) + } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) @@ -106,7 +110,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) } } - e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ // TODO(fabxc): potentially remove add and delete event handlers. Those should // be triggered via the endpoint handlers already. AddFunc: func(o interface{}) { @@ -122,8 +126,11 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca serviceUpdate(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + } if e.withNodeMetadata { - e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) @@ -137,6 +144,9 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca e.enqueueNode(node.Name) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + } } return e @@ -295,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + if addr.NodeName != nil { + target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName) + } else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name) + } } pod := e.resolvePodRef(addr.TargetRef) @@ -375,18 +389,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } @@ -456,5 +473,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v) nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue } + return tg.Merge(nodeLabelset) } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index 91b1b0c676..256ffce1b8 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints { }, }, }, + { + Addresses: []v1.EndpointAddress{ + { + IP: "6.7.8.9", + TargetRef: &v1.ObjectReference{ + Kind: "Node", + Name: "barbaz", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "testport", + Port: 9002, + Protocol: v1.ProtocolTCP, + }, + }, + }, }, } } @@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) { func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} - node := makeNode("foobar", "", "", nodeLabels, nil) + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { }, }, } - n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node) + n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2) k8sDiscoveryTest{ discovery: n, @@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) { } func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { - nodeLabels := map[string]string{"az": "us-east1"} - nodes := makeNode("foobar", "", "", nodeLabels, nil) + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) metadataConfig := AttachMetadataConfig{Node: true} svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, } - n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc) + n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc) k8sDiscoveryTest{ discovery: n, afterStart: func() { - nodes.Labels["az"] = "eu-central1" - c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{}) + node1.Labels["az"] = "eu-central1" + c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{}) }, expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ @@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_name": "testport", "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "true", - "__meta_kubernetes_node_label_az": "eu-central1", + "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, @@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "default", @@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "ns1", @@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpoint_port_protocol": "TCP", "__meta_kubernetes_endpoint_ready": "false", }, + { + "__address__": "6.7.8.9:9002", + "__meta_kubernetes_endpoint_address_target_kind": "Node", + "__meta_kubernetes_endpoint_address_target_name": "barbaz", + "__meta_kubernetes_endpoint_port_name": "testport", + "__meta_kubernetes_endpoint_port_protocol": "TCP", + "__meta_kubernetes_endpoint_ready": "true", + }, }, Labels: model.LabelSet{ "__meta_kubernetes_namespace": "own-ns", @@ -825,3 +909,46 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpoints() + ep.Namespace = "ns" + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: v1.PodSpec{ + NodeName: "testnode", + Containers: []v1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []v1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: v1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: v1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index 594759f454..1eb3fc1807 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -73,7 +73,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod queue: workqueue.NewNamed("endpointSlice"), } - e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { epslAddCount.Inc() e.enqueue(o) @@ -87,6 +87,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod e.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) + } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) @@ -109,7 +112,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod } } } - e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { svcAddCount.Inc() serviceUpdate(o) @@ -123,9 +126,12 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod serviceUpdate(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + } if e.withNodeMetadata { - e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) @@ -139,6 +145,9 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod e.enqueueNode(node.Name) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + } } return e @@ -181,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } go func() { - for e.process(ctx, ch) { + for e.process(ctx, ch) { // nolint:revive } }() @@ -250,6 +259,8 @@ const ( endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port" endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol" endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" + endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" + endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" @@ -289,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol())) + target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) } if port.port() != nil { @@ -304,6 +315,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) } + if ep.conditions().serving() != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + } + + if ep.conditions().terminating() != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + } + if ep.hostname() != nil { target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) } @@ -320,7 +339,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou } if e.withNodeMetadata { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + } else { + target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + } } pod := e.resolvePodRef(ep.targetRef()) @@ -393,18 +416,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) - ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + // PodIP can be empty when a pod is starting or has been evicted. + if len(pe.pod.Status.PodIP) != 0 { + a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) + ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) - target := model.LabelSet{ - model.AddressLabel: lv(a), - podContainerNameLabel: lv(c.Name), - podContainerImageLabel: lv(c.Image), - podContainerPortNameLabel: lv(cport.Name), - podContainerPortNumberLabel: lv(ports), - podContainerPortProtocolLabel: lv(string(cport.Protocol)), + target := model.LabelSet{ + model.AddressLabel: lv(a), + podContainerNameLabel: lv(c.Name), + podContainerImageLabel: lv(c.Image), + podContainerPortNameLabel: lv(cport.Name), + podContainerPortNumberLabel: lv(ports), + podContainerPortProtocolLabel: lv(string(cport.Protocol)), + } + tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } - tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } diff --git a/discovery/kubernetes/endpointslice_adaptor.go b/discovery/kubernetes/endpointslice_adaptor.go index 87484b06fd..5a21f1b899 100644 --- a/discovery/kubernetes/endpointslice_adaptor.go +++ b/discovery/kubernetes/endpointslice_adaptor.go @@ -49,6 +49,8 @@ type endpointSliceEndpointAdaptor interface { type endpointSliceEndpointConditionsAdaptor interface { ready() *bool + serving() *bool + terminating() *bool } // Adaptor for k8s.io/api/discovery/v1 @@ -193,6 +195,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSliceEndpointAdaptorV1beta1 struct { endpoint v1beta1.Endpoint } @@ -237,6 +247,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } diff --git a/discovery/kubernetes/endpointslice_adaptor_test.go b/discovery/kubernetes/endpointslice_adaptor_test.go index 9a2a003123..e564910936 100644 --- a/discovery/kubernetes/endpointslice_adaptor_test.go +++ b/discovery/kubernetes/endpointslice_adaptor_test.go @@ -35,6 +35,8 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) require.Equal(t, endpointSlice.Endpoints[i].DeprecatedTopology, endpointAdaptor.topology()) } @@ -61,6 +63,8 @@ func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) { require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses()) require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname()) require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving()) + require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating()) require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef()) require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology()) } diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go index a0ae543fc9..0c31147a93 100644 --- a/discovery/kubernetes/endpointslice_test.go +++ b/discovery/kubernetes/endpointslice_test.go @@ -64,23 +64,42 @@ func makeEndpointSliceV1() *v1.EndpointSlice { }, Endpoints: []v1.Endpoint{ { - Addresses: []string{"1.2.3.4"}, - Conditions: v1.EndpointConditions{Ready: boolptr(true)}, - Hostname: strptr("testendpoint1"), - TargetRef: &corev1.ObjectReference{}, - NodeName: strptr("foobar"), + Addresses: []string{"1.2.3.4"}, + Conditions: v1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + Hostname: strptr("testendpoint1"), + TargetRef: &corev1.ObjectReference{}, + NodeName: strptr("foobar"), DeprecatedTopology: map[string]string{ "topology": "value", }, }, { Addresses: []string{"2.3.4.5"}, Conditions: v1.EndpointConditions{ - Ready: boolptr(true), + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), }, }, { Addresses: []string{"3.4.5.6"}, Conditions: v1.EndpointConditions{ - Ready: boolptr(false), + Ready: boolptr(false), + Serving: boolptr(true), + Terminating: boolptr(true), + }, + }, { + Addresses: []string{"4.5.6.7"}, + Conditions: v1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + TargetRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "barbaz", }, }, }, @@ -111,12 +130,27 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice { }, { Addresses: []string{"2.3.4.5"}, Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(true), + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), }, }, { Addresses: []string{"3.4.5.6"}, Conditions: v1beta1.EndpointConditions{ - Ready: boolptr(false), + Ready: boolptr(false), + Serving: boolptr(true), + Terminating: boolptr(true), + }, + }, { + Addresses: []string{"4.5.6.7"}, + Conditions: v1beta1.EndpointConditions{ + Ready: boolptr(true), + Serving: boolptr(true), + Terminating: boolptr(false), + }, + TargetRef: &corev1.ObjectReference{ + Kind: "Node", + Name: "barbaz", }, }, }, @@ -141,6 +175,8 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -151,19 +187,35 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -199,17 +251,32 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -367,6 +434,8 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -377,19 +446,35 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: map[model.LabelName]model.LabelValue{ @@ -445,6 +530,8 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -455,19 +542,35 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -512,6 +615,8 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -522,19 +627,35 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -574,6 +695,8 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -584,19 +707,35 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -652,6 +791,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -662,19 +803,35 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -695,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) { func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -705,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, }, } - objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc} + objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc} n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) k8sDiscoveryTest{ @@ -719,6 +877,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -732,19 +892,38 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", }, }, Labels: model.LabelSet{ @@ -763,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) { func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { metadataConfig := AttachMetadataConfig{Node: true} - nodeLabels := map[string]string{"az": "us-east1"} + nodeLabels1 := map[string]string{"az": "us-east1"} + nodeLabels2 := map[string]string{"az": "us-west2"} svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testendpoints", @@ -773,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { }, }, } - node := makeNode("foobar", "", "", nodeLabels, nil) - objs := []runtime.Object{makeEndpointSliceV1(), node, svc} + node1 := makeNode("foobar", "", "", nodeLabels1, nil) + node2 := makeNode("barbaz", "", "", nodeLabels2, nil) + objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc} n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...) k8sDiscoveryTest{ discovery: n, expectedMaxItems: 2, afterStart: func() { - node.Labels["az"] = "us-central1" - c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) + node1.Labels["az"] = "us-central1" + c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{}) }, expectedRes: map[string]*targetgroup.Group{ "endpointslice/default/testendpoints": { @@ -792,6 +973,8 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -799,25 +982,44 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) { "__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_node_label_az": "us-central1", + "__meta_kubernetes_node_label_az": "us-east1", "__meta_kubernetes_node_labelpresent_az": "true", "__meta_kubernetes_node_name": "foobar", }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_node_label_az": "us-west2", + "__meta_kubernetes_node_labelpresent_az": "true", + "__meta_kubernetes_node_name": "barbaz", }, }, Labels: model.LabelSet{ @@ -913,6 +1115,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -923,19 +1127,35 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -1039,6 +1259,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { "__meta_kubernetes_endpointslice_address_target_kind": "", "__meta_kubernetes_endpointslice_address_target_name": "", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", @@ -1049,19 +1271,35 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, { "__address__": "2.3.4.5:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", }, { "__address__": "3.4.5.6:9000", - "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", - "__meta_kubernetes_endpointslice_port": "9000", - "__meta_kubernetes_endpointslice_port_name": "testport", - "__meta_kubernetes_endpointslice_port_protocol": "TCP", - "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + }, + { + "__address__": "4.5.6.7:9000", + "__meta_kubernetes_endpointslice_address_target_kind": "Node", + "__meta_kubernetes_endpointslice_address_target_name": "barbaz", + "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", + "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", + "__meta_kubernetes_endpointslice_port": "9000", + "__meta_kubernetes_endpointslice_port_app_protocol": "http", + "__meta_kubernetes_endpointslice_port_name": "testport", + "__meta_kubernetes_endpointslice_port_protocol": "TCP", }, }, Labels: model.LabelSet{ @@ -1074,3 +1312,46 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) { }, }.Run(t) } + +func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) { + ep := makeEndpointSliceV1() + ep.Namespace = "ns" + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + Namespace: "ns", + UID: types.UID("deadbeef"), + }, + Spec: corev1.PodSpec{ + NodeName: "testnode", + Containers: []corev1.Container{ + { + Name: "p1", + Image: "p1:latest", + Ports: []corev1.ContainerPort{ + { + Name: "mainport", + ContainerPort: 9000, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + Status: corev1.PodStatus{}, + } + + objs := []runtime.Object{ + ep, + pod, + } + + n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...) + + k8sDiscoveryTest{ + discovery: n, + expectedMaxItems: 0, + expectedRes: map[string]*targetgroup.Group{}, + }.Run(t) +} diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index de6d2a0b4d..ad47c341a5 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -48,7 +48,7 @@ type Ingress struct { // NewIngress returns a new ingress discovery. func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress { s := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("ingress")} - s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { ingressAddCount.Inc() s.enqueue(o) @@ -62,6 +62,9 @@ func NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress { s.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) + } return s } @@ -86,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for i.process(ctx, ch) { + for i.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 0ffedc51ed..e87a1c9b24 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { err error ownNamespace string ) - if conf.KubeConfig != "" { + switch { + case conf.KubeConfig != "": kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig) if err != nil { return nil, err } - } else if conf.APIServer.URL == nil { + case conf.APIServer.URL == nil: // Use the Kubernetes provided pod service account // as described in https://kubernetes.io/docs/admin/service-accounts-admin/ kcfg, err = rest.InClusterConfig() @@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) { } level.Info(l).Log("msg", "Using pod service account via in-cluster config") - } else { + default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { return nil, err @@ -382,7 +383,8 @@ func mapSelector(rawSelector []SelectorConfig) roleSelector { return rs } -const resyncPeriod = 10 * time.Minute +// Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics. +const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { @@ -475,8 +477,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpointSlice( log.With(d.logger, "role", "endpointslice"), informer, - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, ) d.discoverers = append(d.discoverers, eps) @@ -534,8 +536,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpoints( log.With(d.logger, "role", "endpoint"), d.newEndpointsByNodeInformer(elw), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, ) d.discoverers = append(d.discoverers, eps) @@ -589,7 +591,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } svc := NewService( log.With(d.logger, "role", "service"), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) @@ -627,7 +629,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { return i.Watch(ctx, options) }, } - informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod) + informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) } else { i := d.client.NetworkingV1beta1().Ingresses(namespace) ilw := &cache.ListWatch{ @@ -642,7 +644,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { return i.Watch(ctx, options) }, } - informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod) + informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) } ingress := NewIngress( log.With(d.logger, "role", "ingress"), @@ -732,7 +734,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { return d.client.CoreV1().Nodes().Watch(ctx, options) }, } - return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod) + return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) } func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { @@ -747,39 +749,45 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde } } - return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if !d.attachMetadata.Node { - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, fmt.Errorf("object is not endpoints") } var nodes []string for _, target := range e.Subsets { for _, addr := range target.Addresses { - if addr.NodeName == nil { - continue + if addr.TargetRef != nil { + switch addr.TargetRef.Kind { + case "Pod": + if addr.NodeName != nil { + nodes = append(nodes, *addr.NodeName) + } + case "Node": + nodes = append(nodes, addr.TargetRef.Name) + } } - nodes = append(nodes, *addr.NodeName) } } return nodes, nil } - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if !d.attachMetadata.Node { - cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncPeriod, indexers) + cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { @@ -787,17 +795,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object switch e := obj.(type) { case *disv1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } case *disv1beta1.EndpointSlice: for _, target := range e.Endpoints { - if target.NodeName == nil { - continue + if target.TargetRef != nil { + switch target.TargetRef.Kind { + case "Pod": + if target.NodeName != nil { + nodes = append(nodes, *target.NodeName) + } + case "Node": + nodes = append(nodes, target.TargetRef.Name) + } } - nodes = append(nodes, *target.NodeName) } default: return nil, fmt.Errorf("object is not an endpointslice") @@ -806,7 +826,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object return nodes, nil } - return cache.NewSharedIndexInformer(plw, object, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) } func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index ebb6bbced7..d0a6d2780d 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -55,7 +55,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node { l = log.NewNopLogger() } n := &Node{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("node")} - n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { nodeAddCount.Inc() n.enqueue(o) @@ -69,6 +69,9 @@ func NewNode(l log.Logger, inf cache.SharedInformer) *Node { n.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + } return n } @@ -93,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for n.process(ctx, ch) { + for n.process(ctx, ch) { // nolint:revive } }() @@ -206,7 +209,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { return tg } -// nodeAddresses returns the provided node's address, based on the priority: +// nodeAddress returns the provided node's address, based on the priority: // 1. NodeInternalIP // 2. NodeInternalDNS // 3. NodeExternalIP diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index e19a33c1ae..732cf52ad9 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -65,7 +65,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo logger: l, queue: workqueue.NewNamed("pod"), } - p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { podAddCount.Inc() p.enqueue(o) @@ -79,9 +79,12 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo p.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + } if p.withNodeMetadata { - p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { node := o.(*apiv1.Node) p.enqueuePodsForNode(node.Name) @@ -95,6 +98,9 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo p.enqueuePodsForNode(node.Name) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + } } return p @@ -126,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for p.process(ctx, ch) { + for p.process(ctx, ch) { // nolint:revive } }() @@ -177,6 +183,7 @@ const ( podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" + podContainerIDLabel = metaLabelPrefix + "pod_container_id" podContainerImageLabel = metaLabelPrefix + "pod_container_image" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" @@ -242,6 +249,24 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { return ls } +func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containerName string) (*apiv1.ContainerStatus, error) { + for _, s := range *statuses { + if s.Name == containerName { + return &s, nil + } + } + return nil, fmt.Errorf("cannot find container with name %v", containerName) +} + +func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { + cStatus, err := p.findPodContainerStatus(statuses, containerName) + if err != nil { + level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + return "" + } + return cStatus.ContainerID +} + func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg := &targetgroup.Group{ Source: podSource(pod), @@ -261,6 +286,12 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { for i, c := range containers { isInit := i >= len(pod.Spec.Containers) + cStatuses := &pod.Status.ContainerStatuses + if isInit { + cStatuses = &pod.Status.InitContainerStatuses + } + cID := p.findPodContainerID(cStatuses, c.Name) + // If no ports are defined for the container, create an anonymous // target per container. if len(c.Ports) == 0 { @@ -269,6 +300,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(pod.Status.PodIP), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerIsInit: lv(strconv.FormatBool(isInit)), }) @@ -282,6 +314,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(addr), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerPortNumberLabel: lv(ports), podContainerPortNameLabel: lv(port.Name), diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index f2b79dbb84..286a1a230d 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -81,6 +81,16 @@ func makeMultiPortPods() *v1.Pod { Status: v1.ConditionTrue, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer0", + ContainerID: "docker://a1b2c3d4e5f6", + }, + { + Name: "testcontainer1", + ContainerID: "containerd://6f5e4d3c2b1a", + }, + }, }, } } @@ -118,6 +128,12 @@ func makePods() *v1.Pod { Status: v1.ConditionTrue, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer", + ContainerID: "docker://a1b2c3d4e5f6", + }, + }, }, } } @@ -162,6 +178,18 @@ func makeInitContainerPods() *v1.Pod { Status: v1.ConditionFalse, }, }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "testcontainer", + ContainerID: "docker://a1b2c3d4e5f6", + }, + }, + InitContainerStatuses: []v1.ContainerStatus{ + { + Name: "initcontainer", + ContainerID: "containerd://6f5e4d3c2b1a", + }, + }, }, } } @@ -179,6 +207,7 @@ func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, }, Labels: model.LabelSet{ @@ -230,6 +259,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4:9001", @@ -239,12 +269,14 @@ func TestPodDiscoveryBeforeRun(t *testing.T) { "__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_protocol": "UDP", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", }, { "__address__": "1.2.3.4", "__meta_kubernetes_pod_container_name": "testcontainer1", "__meta_kubernetes_pod_container_image": "testcontainer1:latest", "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }, }, Labels: model.LabelSet{ @@ -280,6 +312,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) { "__meta_kubernetes_pod_container_name": "initcontainer", "__meta_kubernetes_pod_container_image": "initcontainer:latest", "__meta_kubernetes_pod_container_init": "true", + "__meta_kubernetes_pod_container_id": "containerd://6f5e4d3c2b1a", }) expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending" expected[key].Labels["__meta_kubernetes_pod_ready"] = "false" diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 44ebcbc190..40e17679ee 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -51,7 +51,7 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service { l = log.NewNopLogger() } s := &Service{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed("service")} - s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(o interface{}) { svcAddCount.Inc() s.enqueue(o) @@ -65,6 +65,9 @@ func NewService(l log.Logger, inf cache.SharedInformer) *Service { s.enqueue(o) }, }) + if err != nil { + level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + } return s } @@ -89,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } go func() { - for s.process(ctx, ch) { + for s.process(ctx, ch) { // nolint:revive } }() diff --git a/discovery/legacymanager/manager.go b/discovery/legacymanager/manager.go index 7a3d6b3b82..87823f4010 100644 --- a/discovery/legacymanager/manager.go +++ b/discovery/legacymanager/manager.go @@ -311,10 +311,10 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int } typ := cfg.Name() d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 57c82b72a8..13b84e6e36 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go index 687f093829..955705394d 100644 --- a/discovery/legacymanager/registry.go +++ b/discovery/legacymanager/registry.go @@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 0fd0a2c370..12b9575143 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -249,20 +249,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro if detailedIP.Address != ip.String() { continue } - - if detailedIP.Public && publicIPv4 == "" { + switch { + case detailedIP.Public && publicIPv4 == "": publicIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { publicIPv4RDNS = detailedIP.RDNS } - } else if !detailedIP.Public && privateIPv4 == "" { + case !detailedIP.Public && privateIPv4 == "": privateIPv4 = detailedIP.Address if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { privateIPv4RDNS = detailedIP.RDNS } - } else { + default: extraIPs = append(extraIPs, detailedIP.Address) } } diff --git a/discovery/manager.go b/discovery/manager.go index b7357fa6cd..8b304a0faf 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 970168b0f5..5371608112 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) { case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { - assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string { - return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v", - x, - got, - expected) - }) + assertEqualGroups(t, got, tc.expectedTargets[x]) } } } @@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) { } } -func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) { +func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) { t.Helper() // Need to sort by the groups's source as the received order is not guaranteed. @@ -1129,7 +1124,7 @@ type lockStaticConfig struct { } func (s lockStaticConfig) Name() string { return "lockstatic" } -func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) { +func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return (lockStaticDiscoverer)(s), nil } @@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) { if _, ok := tgs[k]; !ok { t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs) } - assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string { - return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected) - }) + assertEqualGroups(t, tgs[k], expected.tgs[k]) } } } @@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) { // TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when // ApplyConfig happens at the same time as targets update. -func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { +func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() discoveryManager := NewManager(ctx, log.NewNopLogger()) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 079f93ad0b..cfd3e2c083 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { return nil, err } - if len(conf.AuthToken) > 0 { + switch { + case len(conf.AuthToken) > 0: rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt) - } else if len(conf.AuthTokenFile) > 0 { + case len(conf.AuthTokenFile) > 0: rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt) } if err != nil { @@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet { var labels []map[string]string var prefix string - if len(app.Container.PortMappings) != 0 { + switch { + case len(app.Container.PortMappings) != 0: // In Marathon 1.5.x the "container.docker.portMappings" object was moved // to "container.portMappings". ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.Container.Docker.PortMappings) != 0 { + case len(app.Container.Docker.PortMappings) != 0: // Prior to Marathon 1.5 the port mappings could be found at the path // "container.docker.portMappings". ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet()) prefix = portMappingLabelPrefix - } else if len(app.PortDefinitions) != 0 { + case len(app.PortDefinitions) != 0: // PortDefinitions deprecates the "ports" array and can be used to specify // a list of ports with metadata in case a mapping is not required. ports = make([]uint32, len(app.PortDefinitions)) diff --git a/discovery/moby/testdata/dockerprom/containers/json_limit_0.json b/discovery/moby/testdata/dockerprom/containers/json.json similarity index 100% rename from discovery/moby/testdata/dockerprom/containers/json_limit_0.json rename to discovery/moby/testdata/dockerprom/containers/json.json diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index c8d5130396..7013f0737c 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { return d, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) { opts := &nomad.QueryOptions{ AllowStale: d.allowStale, } diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 2f7e99a071..b2fe1e7870 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -36,6 +36,7 @@ const ( openstackLabelAddressPool = openstackLabelPrefix + "address_pool" openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor" openstackLabelInstanceID = openstackLabelPrefix + "instance_id" + openstackLabelInstanceImage = openstackLabelPrefix + "instance_image" openstackLabelInstanceName = openstackLabelPrefix + "instance_name" openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status" openstackLabelPrivateIP = openstackLabelPrefix + "private_ip" @@ -144,12 +145,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, openstackLabelUserID: model.LabelValue(s.UserID), } - id, ok := s.Flavor["id"].(string) + flavorId, ok := s.Flavor["id"].(string) if !ok { level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string") continue } - labels[openstackLabelInstanceFlavor] = model.LabelValue(id) + labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorId) + + imageId, ok := s.Image["id"].(string) + if ok { + labels[openstackLabelInstanceImage] = model.LabelValue(imageId) + } + for k, v := range s.Metadata { name := strutil.SanitizeLabelName(k) labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go index d47cb0020e..d2da5d9681 100644 --- a/discovery/openstack/instance_test.go +++ b/discovery/openstack/instance_test.go @@ -73,6 +73,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.32:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("herp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.32"), @@ -85,6 +86,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) { "__address__": model.LabelValue("10.0.0.31:0"), "__meta_openstack_instance_flavor": model.LabelValue("1"), "__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"), + "__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"), "__meta_openstack_instance_status": model.LabelValue("ACTIVE"), "__meta_openstack_instance_name": model.LabelValue("derp"), "__meta_openstack_private_ip": model.LabelValue("10.0.0.31"), diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go new file mode 100644 index 0000000000..bb5dadcd7b --- /dev/null +++ b/discovery/ovhcloud/dedicated_server.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "context" + "fmt" + "net/netip" + "net/url" + "path" + "strconv" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/ovh/go-ovh/ovh" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + dedicatedServerAPIPath = "/dedicated/server" + dedicatedServerLabelPrefix = metaLabelPrefix + "dedicated_server_" +) + +// dedicatedServer struct from API. Also contains IP addresses that are fetched +// independently. +type dedicatedServer struct { + State string `json:"state"` + ips []netip.Addr + CommercialRange string `json:"commercialRange"` + LinkSpeed int `json:"linkSpeed"` + Rack string `json:"rack"` + NoIntervention bool `json:"noIntervention"` + Os string `json:"os"` + SupportLevel string `json:"supportLevel"` + ServerID int64 `json:"serverId"` + Reverse string `json:"reverse"` + Datacenter string `json:"datacenter"` + Name string `json:"name"` +} + +type dedicatedServerDiscovery struct { + *refresh.Discovery + config *SDConfig + logger log.Logger +} + +func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery { + return &dedicatedServerDiscovery{config: conf, logger: logger} +} + +func getDedicatedServerList(client *ovh.Client) ([]string, error) { + var dedicatedListName []string + err := client.Get(dedicatedServerAPIPath, &dedicatedListName) + if err != nil { + return nil, err + } + + return dedicatedListName, nil +} + +func getDedicatedServerDetails(client *ovh.Client, serverName string) (*dedicatedServer, error) { + var dedicatedServerDetails dedicatedServer + err := client.Get(path.Join(dedicatedServerAPIPath, url.QueryEscape(serverName)), &dedicatedServerDetails) + if err != nil { + return nil, err + } + + var ips []string + err = client.Get(path.Join(dedicatedServerAPIPath, url.QueryEscape(serverName), "ips"), &ips) + if err != nil { + return nil, err + } + + parsedIPs, err := parseIPList(ips) + if err != nil { + return nil, err + } + + dedicatedServerDetails.ips = parsedIPs + return &dedicatedServerDetails, nil +} + +func (d *dedicatedServerDiscovery) getService() string { + return "dedicated_server" +} + +func (d *dedicatedServerDiscovery) getSource() string { + return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) +} + +func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { + client, err := createClient(d.config) + if err != nil { + return nil, err + } + var dedicatedServerDetailedList []dedicatedServer + dedicatedServerList, err := getDedicatedServerList(client) + if err != nil { + return nil, err + } + for _, dedicatedServerName := range dedicatedServerList { + dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName) + if err != nil { + err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) + if err != nil { + return nil, err + } + continue + } + dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer) + } + var targets []model.LabelSet + + for _, server := range dedicatedServerDetailedList { + var ipv4, ipv6 string + for _, ip := range server.ips { + if ip.Is4() { + ipv4 = ip.String() + } + if ip.Is6() { + ipv6 = ip.String() + } + } + defaultIP := ipv4 + if defaultIP == "" { + defaultIP = ipv6 + } + labels := model.LabelSet{ + model.AddressLabel: model.LabelValue(defaultIP), + model.InstanceLabel: model.LabelValue(server.Name), + dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), + dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), + dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), + dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), + dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), + dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), + dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), + dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), + dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), + dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), + dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), + dedicatedServerLabelPrefix + "ipv4": model.LabelValue(ipv4), + dedicatedServerLabelPrefix + "ipv6": model.LabelValue(ipv6), + } + targets = append(targets, labels) + } + + return []*targetgroup.Group{{Source: d.getSource(), Targets: targets}}, nil +} diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go new file mode 100644 index 0000000000..e8ffa4a283 --- /dev/null +++ b/discovery/ovhcloud/dedicated_server_test.go @@ -0,0 +1,123 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/go-kit/log" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestOvhcloudDedicatedServerRefresh(t *testing.T) { + var cfg SDConfig + + mock := httptest.NewServer(http.HandlerFunc(MockDedicatedAPI)) + defer mock.Close() + cfgString := fmt.Sprintf(` +--- +service: dedicated_server +endpoint: %s +application_key: %s +application_secret: %s +consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) + + require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) + d, err := newRefresher(&cfg, log.NewNopLogger()) + require.NoError(t, err) + ctx := context.Background() + targetGroups, err := d.refresh(ctx) + require.NoError(t, err) + + require.Equal(t, 1, len(targetGroups)) + targetGroup := targetGroups[0] + require.NotNil(t, targetGroup) + require.NotNil(t, targetGroup.Targets) + require.Equal(t, 1, len(targetGroup.Targets)) + + for i, lbls := range []model.LabelSet{ + { + "__address__": "1.2.3.4", + "__meta_ovhcloud_dedicated_server_commercial_range": "Advance-1 Gen 2", + "__meta_ovhcloud_dedicated_server_datacenter": "gra3", + "__meta_ovhcloud_dedicated_server_ipv4": "1.2.3.4", + "__meta_ovhcloud_dedicated_server_ipv6": "", + "__meta_ovhcloud_dedicated_server_link_speed": "123", + "__meta_ovhcloud_dedicated_server_name": "abcde", + "__meta_ovhcloud_dedicated_server_no_intervention": "false", + "__meta_ovhcloud_dedicated_server_os": "debian11_64", + "__meta_ovhcloud_dedicated_server_rack": "TESTRACK", + "__meta_ovhcloud_dedicated_server_reverse": "abcde-rev", + "__meta_ovhcloud_dedicated_server_server_id": "1234", + "__meta_ovhcloud_dedicated_server_state": "test", + "__meta_ovhcloud_dedicated_server_support_level": "pro", + "instance": "abcde", + }, + } { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, targetGroup.Targets[i]) + }) + } +} + +func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Ovh-Application") != ovhcloudApplicationKeyTest { + http.Error(w, "bad application key", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + if r.URL.Path == "/dedicated/server" { + dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServersList) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if r.URL.Path == "/dedicated/server/abcde" { + dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServer) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if r.URL.Path == "/dedicated/server/abcde/ips" { + dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServerIPs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go new file mode 100644 index 0000000000..535ade4df5 --- /dev/null +++ b/discovery/ovhcloud/ovhcloud.go @@ -0,0 +1,155 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "context" + "errors" + "fmt" + "net/netip" + "time" + + "github.com/go-kit/log" + "github.com/ovh/go-ovh/ovh" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// metaLabelPrefix is the meta prefix used for all meta labels in this discovery. +const metaLabelPrefix = model.MetaLabelPrefix + "ovhcloud_" + +type refresher interface { + refresh(context.Context) ([]*targetgroup.Group, error) +} + +var DefaultSDConfig = SDConfig{ + Endpoint: "ovh-eu", + RefreshInterval: model.Duration(60 * time.Second), +} + +// SDConfig defines the Service Discovery struct used for configuration. +type SDConfig struct { + Endpoint string `yaml:"endpoint"` + ApplicationKey string `yaml:"application_key"` + ApplicationSecret config.Secret `yaml:"application_secret"` + ConsumerKey config.Secret `yaml:"consumer_key"` + RefreshInterval model.Duration `yaml:"refresh_interval"` + Service string `yaml:"service"` +} + +// Name implements the Discoverer interface. +func (c SDConfig) Name() string { + return "ovhcloud" +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultSDConfig + type plain SDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + + if c.Endpoint == "" { + return errors.New("endpoint can not be empty") + } + if c.ApplicationKey == "" { + return errors.New("application key can not be empty") + } + if c.ApplicationSecret == "" { + return errors.New("application secret can not be empty") + } + if c.ConsumerKey == "" { + return errors.New("consumer key can not be empty") + } + switch c.Service { + case "dedicated_server", "vps": + return nil + default: + return fmt.Errorf("unknown service: %v", c.Service) + } +} + +// CreateClient creates a new ovh client configured with given credentials. +func createClient(config *SDConfig) (*ovh.Client, error) { + return ovh.NewClient(config.Endpoint, config.ApplicationKey, string(config.ApplicationSecret), string(config.ConsumerKey)) +} + +// NewDiscoverer returns a Discoverer for the Config. +func (c *SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { + return NewDiscovery(c, options.Logger) +} + +func init() { + discovery.RegisterConfig(&SDConfig{}) +} + +// ParseIPList parses ip list as they can have different formats. +func parseIPList(ipList []string) ([]netip.Addr, error) { + var ipAddresses []netip.Addr + for _, ip := range ipList { + ipAddr, err := netip.ParseAddr(ip) + if err != nil { + ipPrefix, err := netip.ParsePrefix(ip) + if err != nil { + return nil, errors.New("could not parse IP addresses from list") + } + if ipPrefix.IsValid() { + netmask := ipPrefix.Bits() + if netmask != 32 { + continue + } + ipAddr = ipPrefix.Addr() + } + } + if ipAddr.IsValid() && !ipAddr.IsUnspecified() { + ipAddresses = append(ipAddresses, ipAddr) + } + } + + if len(ipAddresses) == 0 { + return nil, errors.New("could not parse IP addresses from list") + } + return ipAddresses, nil +} + +func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { + switch conf.Service { + case "vps": + return newVpsDiscovery(conf, logger), nil + case "dedicated_server": + return newDedicatedServerDiscovery(conf, logger), nil + } + return nil, fmt.Errorf("unknown OVHcloud discovery service '%s'", conf.Service) +} + +// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. +func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) { + r, err := newRefresher(conf, logger) + if err != nil { + return nil, err + } + + return refresh.NewDiscovery( + logger, + "ovhcloud", + time.Duration(conf.RefreshInterval), + r.refresh, + ), nil +} diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go new file mode 100644 index 0000000000..efcd95bb0d --- /dev/null +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -0,0 +1,129 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "errors" + "fmt" + "testing" + + "github.com/prometheus/common/config" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/util/testutil" +) + +var ( + ovhcloudApplicationKeyTest = "TDPKJdwZwAQPwKX2" + ovhcloudApplicationSecretTest = config.Secret("9ufkBmLaTQ9nz5yMUlg79taH0GNnzDjk") + ovhcloudConsumerKeyTest = config.Secret("5mBuy6SUQcRw2ZUxg0cG68BoDKpED4KY") +) + +const ( + mockURL = "https://localhost:1234" +) + +func getMockConf(service string) (SDConfig, error) { + confString := fmt.Sprintf(` +endpoint: %s +application_key: %s +application_secret: %s +consumer_key: %s +refresh_interval: 1m +service: %s +`, mockURL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest, service) + + return getMockConfFromString(confString) +} + +func getMockConfFromString(confString string) (SDConfig, error) { + var conf SDConfig + err := yaml.UnmarshalStrict([]byte(confString), &conf) + return conf, err +} + +func TestErrorInitClient(t *testing.T) { + confString := fmt.Sprintf(` +endpoint: %s + +`, mockURL) + + conf, _ := getMockConfFromString(confString) + + _, err := createClient(&conf) + + require.ErrorContains(t, err, "missing application key") +} + +func TestParseIPs(t *testing.T) { + testCases := []struct { + name string + input []string + want error + }{ + { + name: "Parse IPv4 failed.", + input: []string{"A.b"}, + want: errors.New("could not parse IP addresses from list"), + }, + { + name: "Parse unspecified failed.", + input: []string{"0.0.0.0"}, + want: errors.New("could not parse IP addresses from list"), + }, + { + name: "Parse void IP failed.", + input: []string{""}, + want: errors.New("could not parse IP addresses from list"), + }, + { + name: "Parse IPv6 ok.", + input: []string{"2001:0db8:0000:0000:0000:0000:0000:0001"}, + want: nil, + }, + { + name: "Parse IPv6 failed.", + input: []string{"bbb:cccc:1111"}, + want: errors.New("could not parse IP addresses from list"), + }, + { + name: "Parse IPv4 bad mask.", + input: []string{"192.0.2.1/23"}, + want: errors.New("could not parse IP addresses from list"), + }, + { + name: "Parse IPv4 ok.", + input: []string{"192.0.2.1/32"}, + want: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := parseIPList(tc.input) + require.Equal(t, tc.want, err) + }) + } +} + +func TestDiscoverer(t *testing.T) { + conf, _ := getMockConf("vps") + logger := testutil.NewLogger(t) + _, err := conf.NewDiscoverer(discovery.DiscovererOptions{ + Logger: logger, + }) + + require.NoError(t, err) +} diff --git a/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers.json b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers.json new file mode 100644 index 0000000000..a250e96051 --- /dev/null +++ b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers.json @@ -0,0 +1,3 @@ +[ + "abcde" +] \ No newline at end of file diff --git a/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_abcde_ips.json b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_abcde_ips.json new file mode 100644 index 0000000000..a1b949c975 --- /dev/null +++ b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_abcde_ips.json @@ -0,0 +1,4 @@ +[ + "1.2.3.4/32", + "2001:0db8:0000:0000:0000:0000:0000:0001/64" +] \ No newline at end of file diff --git a/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_details.json b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_details.json new file mode 100644 index 0000000000..68f3e3b5c9 --- /dev/null +++ b/discovery/ovhcloud/testdata/dedicated_server/dedicated_servers_details.json @@ -0,0 +1,20 @@ +{ + "ip": "1.2.3.4", + "newUpgradeSystem": true, + "commercialRange": "Advance-1 Gen 2", + "rack": "TESTRACK", + "rescueMail": null, + "supportLevel": "pro", + "bootId": 1, + "linkSpeed": 123, + "professionalUse": false, + "monitoring": true, + "noIntervention": false, + "name": "abcde", + "rootDevice": null, + "state": "test", + "datacenter": "gra3", + "os": "debian11_64", + "reverse": "abcde-rev", + "serverId": 1234 +} \ No newline at end of file diff --git a/discovery/ovhcloud/testdata/vps/vps.json b/discovery/ovhcloud/testdata/vps/vps.json new file mode 100644 index 0000000000..18147865ba --- /dev/null +++ b/discovery/ovhcloud/testdata/vps/vps.json @@ -0,0 +1,3 @@ +[ + "abc" +] \ No newline at end of file diff --git a/discovery/ovhcloud/testdata/vps/vps_abc_ips.json b/discovery/ovhcloud/testdata/vps/vps_abc_ips.json new file mode 100644 index 0000000000..3c871d0933 --- /dev/null +++ b/discovery/ovhcloud/testdata/vps/vps_abc_ips.json @@ -0,0 +1,4 @@ +[ + "192.0.2.1/32", + "2001:0db1:0000:0000:0000:0000:0000:0001/64" +] \ No newline at end of file diff --git a/discovery/ovhcloud/testdata/vps/vps_details.json b/discovery/ovhcloud/testdata/vps/vps_details.json new file mode 100644 index 0000000000..f2bb2e803c --- /dev/null +++ b/discovery/ovhcloud/testdata/vps/vps_details.json @@ -0,0 +1,25 @@ +{ + "offerType": "ssd", + "monitoringIpBlocks": [], + "displayName": "abc", + "zone": "zone", + "cluster": "cluster_test", + "slaMonitoring": false, + "name": "abc", + "vcore": 1, + "state": "running", + "keymap": null, + "netbootMode": "local", + "model": { + "name": "vps-value-1-2-40", + "availableOptions": [], + "maximumAdditionnalIp": 16, + "offer": "VPS abc", + "disk": 40, + "version": "2019v1", + "vcore": 1, + "memory": 2048, + "datacenter": [] + }, + "memoryLimit": 2048 +} \ No newline at end of file diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go new file mode 100644 index 0000000000..e2d1dee364 --- /dev/null +++ b/discovery/ovhcloud/vps.go @@ -0,0 +1,187 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "context" + "fmt" + "net/netip" + "net/url" + "path" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/ovh/go-ovh/ovh" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + vpsAPIPath = "/vps" + vpsLabelPrefix = metaLabelPrefix + "vps_" +) + +// Model struct from API. +type vpsModel struct { + MaximumAdditionalIP int `json:"maximumAdditionnalIp"` + Offer string `json:"offer"` + Datacenter []string `json:"datacenter"` + Vcore int `json:"vcore"` + Version string `json:"version"` + Name string `json:"name"` + Disk int `json:"disk"` + Memory int `json:"memory"` +} + +// VPS struct from API. Also contains IP addresses that are fetched +// independently. +type virtualPrivateServer struct { + ips []netip.Addr + Keymap []string `json:"keymap"` + Zone string `json:"zone"` + Model vpsModel `json:"model"` + DisplayName string `json:"displayName"` + MonitoringIPBlocks []string `json:"monitoringIpBlocks"` + Cluster string `json:"cluster"` + State string `json:"state"` + Name string `json:"name"` + NetbootMode string `json:"netbootMode"` + MemoryLimit int `json:"memoryLimit"` + OfferType string `json:"offerType"` + Vcore int `json:"vcore"` +} + +type vpsDiscovery struct { + *refresh.Discovery + config *SDConfig + logger log.Logger +} + +func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery { + return &vpsDiscovery{config: conf, logger: logger} +} + +func getVpsDetails(client *ovh.Client, vpsName string) (*virtualPrivateServer, error) { + var vpsDetails virtualPrivateServer + vpsNamePath := path.Join(vpsAPIPath, url.QueryEscape(vpsName)) + + err := client.Get(vpsNamePath, &vpsDetails) + if err != nil { + return nil, err + } + + var ips []string + err = client.Get(path.Join(vpsNamePath, "ips"), &ips) + if err != nil { + return nil, err + } + + parsedIPs, err := parseIPList(ips) + if err != nil { + return nil, err + } + vpsDetails.ips = parsedIPs + + return &vpsDetails, nil +} + +func getVpsList(client *ovh.Client) ([]string, error) { + var vpsListName []string + + err := client.Get(vpsAPIPath, &vpsListName) + if err != nil { + return nil, err + } + + return vpsListName, nil +} + +func (d *vpsDiscovery) getService() string { + return "vps" +} + +func (d *vpsDiscovery) getSource() string { + return fmt.Sprintf("%s_%s", d.config.Name(), d.getService()) +} + +func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { + client, err := createClient(d.config) + if err != nil { + return nil, err + } + + var vpsDetailedList []virtualPrivateServer + vpsList, err := getVpsList(client) + if err != nil { + return nil, err + } + + for _, vpsName := range vpsList { + vpsDetailed, err := getVpsDetails(client, vpsName) + if err != nil { + err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) + if err != nil { + return nil, err + } + continue + } + vpsDetailedList = append(vpsDetailedList, *vpsDetailed) + } + + var targets []model.LabelSet + for _, server := range vpsDetailedList { + var ipv4, ipv6 string + for _, ip := range server.ips { + if ip.Is4() { + ipv4 = ip.String() + } + if ip.Is6() { + ipv6 = ip.String() + } + } + defaultIP := ipv4 + if defaultIP == "" { + defaultIP = ipv6 + } + labels := model.LabelSet{ + model.AddressLabel: model.LabelValue(defaultIP), + model.InstanceLabel: model.LabelValue(server.Name), + vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), + vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), + vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), + vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), + vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), + vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), + vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), + vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), + vpsLabelPrefix + "zone": model.LabelValue(server.Zone), + vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), + vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), + vpsLabelPrefix + "state": model.LabelValue(server.State), + vpsLabelPrefix + "name": model.LabelValue(server.Name), + vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), + vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), + vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), + vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), + vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), + vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), + } + + targets = append(targets, labels) + } + + return []*targetgroup.Group{{Source: d.getSource(), Targets: targets}}, nil +} diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go new file mode 100644 index 0000000000..b1177f215e --- /dev/null +++ b/discovery/ovhcloud/vps_test.go @@ -0,0 +1,130 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovhcloud + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + yaml "gopkg.in/yaml.v2" + + "github.com/go-kit/log" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" +) + +func TestOvhCloudVpsRefresh(t *testing.T) { + var cfg SDConfig + + mock := httptest.NewServer(http.HandlerFunc(MockVpsAPI)) + defer mock.Close() + cfgString := fmt.Sprintf(` +--- +service: vps +endpoint: %s +application_key: %s +application_secret: %s +consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) + + require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) + + d, err := newRefresher(&cfg, log.NewNopLogger()) + require.NoError(t, err) + ctx := context.Background() + targetGroups, err := d.refresh(ctx) + require.NoError(t, err) + + require.Equal(t, 1, len(targetGroups)) + targetGroup := targetGroups[0] + require.NotNil(t, targetGroup) + require.NotNil(t, targetGroup.Targets) + require.Equal(t, 1, len(targetGroup.Targets)) + for i, lbls := range []model.LabelSet{ + { + "__address__": "192.0.2.1", + "__meta_ovhcloud_vps_ipv4": "192.0.2.1", + "__meta_ovhcloud_vps_ipv6": "", + "__meta_ovhcloud_vps_cluster": "cluster_test", + "__meta_ovhcloud_vps_datacenter": "[]", + "__meta_ovhcloud_vps_disk": "40", + "__meta_ovhcloud_vps_display_name": "abc", + "__meta_ovhcloud_vps_maximum_additional_ip": "16", + "__meta_ovhcloud_vps_memory": "2048", + "__meta_ovhcloud_vps_memory_limit": "2048", + "__meta_ovhcloud_vps_model_name": "vps-value-1-2-40", + "__meta_ovhcloud_vps_name": "abc", + "__meta_ovhcloud_vps_netboot_mode": "local", + "__meta_ovhcloud_vps_offer": "VPS abc", + "__meta_ovhcloud_vps_offer_type": "ssd", + "__meta_ovhcloud_vps_state": "running", + "__meta_ovhcloud_vps_vcore": "1", + "__meta_ovhcloud_vps_model_vcore": "1", + "__meta_ovhcloud_vps_version": "2019v1", + "__meta_ovhcloud_vps_zone": "zone", + "instance": "abc", + }, + } { + t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { + require.Equal(t, lbls, targetGroup.Targets[i]) + }) + } +} + +func MockVpsAPI(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Ovh-Application") != ovhcloudApplicationKeyTest { + http.Error(w, "bad application key", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + if r.URL.Path == "/vps" { + dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServersList) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if r.URL.Path == "/vps/abc" { + dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServer) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + if r.URL.Path == "/vps/abc/ips" { + dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _, err = w.Write(dedicatedServerIPs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} diff --git a/discovery/refresh/refresh.go b/discovery/refresh/refresh.go index 043b2f7ba8..919567a53b 100644 --- a/discovery/refresh/refresh.go +++ b/discovery/refresh/refresh.go @@ -114,7 +114,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { now := time.Now() - defer d.duration.Observe(time.Since(now).Seconds()) + defer func() { + d.duration.Observe(time.Since(now).Seconds()) + }() + tgs, err := d.refreshf(ctx) if err != nil { d.failures.Inc() diff --git a/discovery/registry.go b/discovery/registry.go index 8274628c23..13168a07a7 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { oldStr := oldTyp.String() newStr := newTyp.String() for i, s := range e.Errors { - e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr) } } return err diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 2f489e7d45..42881d3c19 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro if meta.Links.Next == "" { break - } else { - listOptions.Cursor = meta.Links.Next - continue } + listOptions.Cursor = meta.Links.Next } return instances, nil diff --git a/discovery/xds/xds_test.go b/discovery/xds/xds_test.go index b6c141e283..974a47342f 100644 --- a/discovery/xds/xds_test.go +++ b/discovery/xds/xds_test.go @@ -74,16 +74,16 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest. discoveryRes, err := responder(discoveryReq) if err != nil { - w.WriteHeader(500) + w.WriteHeader(http.StatusInternalServerError) return } if discoveryRes == nil { - w.WriteHeader(304) + w.WriteHeader(http.StatusNotModified) return } - w.WriteHeader(200) + w.WriteHeader(http.StatusOK) data, err := protoJSONMarshalOptions.Marshal(discoveryRes) require.NoError(t, err) diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 308d63a5fc..cadff5fd2e 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } for _, pathUpdate := range d.pathUpdates { // Drain event channel in case the treecache leaks goroutines otherwise. - for range pathUpdate { + for range pathUpdate { // nolint:revive } } d.conn.Close() diff --git a/docs/command-line/index.md b/docs/command-line/index.md new file mode 100644 index 0000000000..53786fbb20 --- /dev/null +++ b/docs/command-line/index.md @@ -0,0 +1,4 @@ +--- +title: Command Line +sort_rank: 9 +--- diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md new file mode 100644 index 0000000000..46f286e009 --- /dev/null +++ b/docs/command-line/prometheus.md @@ -0,0 +1,59 @@ +--- +title: prometheus +--- + +# prometheus + +The Prometheus monitoring server + + + +## Flags + +| Flag | Description | Default | +| --- | --- | --- | +| -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | +| --version | Show application version. | | +| --config.file | Prometheus configuration file path. | `prometheus.yml` | +| --web.listen-address | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | +| --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | +| --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | +| --web.max-connections | Maximum number of simultaneous connections. | `512` | +| --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | +| --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | +| --web.user-assets | Path to static asset directory, available at /user. | | +| --web.enable-lifecycle | Enable shutdown and reload via HTTP request. | `false` | +| --web.enable-admin-api | Enable API endpoints for admin control actions. | `false` | +| --web.enable-remote-write-receiver | Enable API endpoint accepting remote write requests. | `false` | +| --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | +| --web.console.libraries | Path to the console library directory. | `console_libraries` | +| --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | +| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | +| --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | +| --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | +| --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | +| --storage.tsdb.retention.size | Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Use with server mode only. | | +| --storage.tsdb.no-lockfile | Do not create lockfile in data directory. Use with server mode only. | `false` | +| --storage.tsdb.head-chunks-write-queue-size | Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental. Use with server mode only. | `0` | +| --storage.agent.path | Base path for metrics storage. Use with agent mode only. | `data-agent/` | +| --storage.agent.wal-compression | Compress the agent WAL. Use with agent mode only. | `true` | +| --storage.agent.retention.min-time | Minimum age samples may be before being considered for deletion when the WAL is truncated Use with agent mode only. | | +| --storage.agent.retention.max-time | Maximum age samples may be before being forcibly deleted when the WAL is truncated Use with agent mode only. | | +| --storage.agent.no-lockfile | Do not create lockfile in data directory. Use with agent mode only. | `false` | +| --storage.remote.flush-deadline | How long to wait flushing sample on shutdown or config reload. | `1m` | +| --storage.remote.read-sample-limit | Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types. Use with server mode only. | `5e7` | +| --storage.remote.read-concurrent-limit | Maximum number of concurrent remote read calls. 0 means no limit. Use with server mode only. | `10` | +| --storage.remote.read-max-bytes-in-frame | Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default. Use with server mode only. | `1048576` | +| --rules.alert.for-outage-tolerance | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` | +| --rules.alert.for-grace-period | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` | +| --rules.alert.resend-delay | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | +| --alertmanager.notification-queue-capacity | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | +| --query.lookback-delta | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | +| --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | +| --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | +| --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | +| --enable-feature | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | +| --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | + + diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md new file mode 100644 index 0000000000..673e8c0481 --- /dev/null +++ b/docs/command-line/promtool.md @@ -0,0 +1,611 @@ +--- +title: promtool +--- + +# promtool + +Tooling for the Prometheus monitoring system. + + + +## Flags + +| Flag | Description | +| --- | --- | +| -h, --help | Show context-sensitive help (also try --help-long and --help-man). | +| --version | Show application version. | +| --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | + + + + +## Commands + +| Command | Description | +| --- | --- | +| help | Show help. | +| check | Check the resources for validity. | +| query | Run query against a Prometheus server. | +| debug | Fetch debug information. | +| push | Push to a Prometheus server. | +| test | Unit testing. | +| tsdb | Run tsdb commands. | + + + + +### `promtool help` + +Show help. + + + +#### Arguments + +| Argument | Description | +| --- | --- | +| command | Show help on command. | + + + + +### `promtool check` + +Check the resources for validity. + + + +#### Flags + +| Flag | Description | +| --- | --- | +| --extended | Print extended information related to the cardinality of the metrics. | + + + + +##### `promtool check service-discovery` + +Perform service discovery for the given job name and report the results, including relabeling. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --timeout | The time to wait for discovery results. | `30s` | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| config-file | The prometheus config file. | Yes | +| job | The job to run service discovery for. | Yes | + + + + +##### `promtool check config` + +Check if the config files are valid or not. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --syntax-only | Only check the config file syntax, ignoring file and content validation referenced in the config | | +| --lint | Linting checks to apply to the rules specified in the config. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | +| --lint-fatal | Make lint errors exit with exit code 3. | `false` | +| --agent | Check config file for Prometheus in Agent mode. | | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| config-files | The config files to check. | Yes | + + + + +##### `promtool check web-config` + +Check if the web config files are valid or not. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| web-config-files | The config files to check. | Yes | + + + + +##### `promtool check healthy` + +Check if the Prometheus server is healthy. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | +| --url | The URL for the Prometheus server. | `http://localhost:9090` | + + + + +##### `promtool check ready` + +Check if the Prometheus server is ready. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | +| --url | The URL for the Prometheus server. | `http://localhost:9090` | + + + + +##### `promtool check rules` + +Check if the rule files are valid or not. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --lint | Linting checks to apply. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` | +| --lint-fatal | Make lint errors exit with exit code 3. | `false` | + + + + +###### Arguments + +| Argument | Description | +| --- | --- | +| rule-files | The rule files to check, default is read from standard input. | + + + + +##### `promtool check metrics` + +Pass Prometheus metrics over stdin to lint them for consistency and correctness. + +examples: + +$ cat metrics.prom | promtool check metrics + +$ curl -s http://localhost:9090/metrics | promtool check metrics + + + +### `promtool query` + +Run query against a Prometheus server. + + + +#### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| -o, --format | Output format of the query. | `promql` | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | + + + + +##### `promtool query instant` + +Run instant query. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| --time | Query evaluation time (RFC3339 or Unix timestamp). | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to query. | Yes | +| expr | PromQL query expression. | Yes | + + + + +##### `promtool query range` + +Run range query. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| --header | Extra headers to send to server. | +| --start | Query range start time (RFC3339 or Unix timestamp). | +| --end | Query range end time (RFC3339 or Unix timestamp). | +| --step | Query step size (duration). | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to query. | Yes | +| expr | PromQL query expression. | Yes | + + + + +##### `promtool query series` + +Run series query. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| --match | Series selector. Can be specified multiple times. | +| --start | Start time (RFC3339 or Unix timestamp). | +| --end | End time (RFC3339 or Unix timestamp). | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to query. | Yes | + + + + +##### `promtool query labels` + +Run labels query. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| --start | Start time (RFC3339 or Unix timestamp). | +| --end | End time (RFC3339 or Unix timestamp). | +| --match | Series selector. Can be specified multiple times. | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to query. | Yes | +| name | Label name to provide label values for. | Yes | + + + + +### `promtool debug` + +Fetch debug information. + + + +##### `promtool debug pprof` + +Fetch profiling debug information. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to get pprof files from. | Yes | + + + + +##### `promtool debug metrics` + +Fetch metrics debug information. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to get metrics from. | Yes | + + + + +##### `promtool debug all` + +Fetch all debug information. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| server | Prometheus server to get all debug information from. | Yes | + + + + +### `promtool push` + +Push to a Prometheus server. + + + +#### Flags + +| Flag | Description | +| --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | + + + + +##### `promtool push metrics` + +Push metrics to a prometheus remote write (for testing purpose only). + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --label | Label to attach to metrics. Can be specified multiple times. | `job=promtool` | +| --timeout | The time to wait for pushing metrics. | `30s` | +| --header | Prometheus remote write header. | | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| remote-write-url | Prometheus remote write url to push metrics. | Yes | +| metric-files | The metric files to push, default is read from standard input. | | + + + + +### `promtool test` + +Unit testing. + + + +##### `promtool test rules` + +Unit tests for rules. + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| test-rule-file | The unit test file. | Yes | + + + + +### `promtool tsdb` + +Run tsdb commands. + + + +##### `promtool tsdb bench` + +Run benchmarks. + + + +##### `promtool tsdb bench write` + +Run a write performance benchmark. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --out | Set the output path. | `benchout` | +| --metrics | Number of metrics to read. | `10000` | +| --scrapes | Number of scrapes to simulate. | `3000` | + + + + +###### Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| file | Input file with samples data, default is (../../tsdb/testdata/20kseries.json). | `../../tsdb/testdata/20kseries.json` | + + + + +##### `promtool tsdb analyze` + +Analyze churn, label pair cardinality and compaction efficiency. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --limit | How many items to show in each list. | `20` | +| --extended | Run extended analysis. | | + + + + +###### Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| db path | Database path (default is data/). | `data/` | +| block id | Block to analyze (default is the last block). | | + + + + +##### `promtool tsdb list` + +List tsdb blocks. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| -r, --human-readable | Print human readable values. | + + + + +###### Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| db path | Database path (default is data/). | `data/` | + + + + +##### `promtool tsdb dump` + +Dump samples from a TSDB. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump. | `9223372036854775807` | +| --match | Series selector. | `{__name__=~'(?s:.*)'}` | + + + + +###### Arguments + +| Argument | Description | Default | +| --- | --- | --- | +| db path | Database path (default is data/). | `data/` | + + + + +##### `promtool tsdb create-blocks-from` + +[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details. + + + +###### Flags + +| Flag | Description | +| --- | --- | +| -r, --human-readable | Print human readable values. | +| -q, --quiet | Do not print created blocks. | + + + + +##### `promtool tsdb create-blocks-from openmetrics` + +Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details. + + + +###### Arguments + +| Argument | Description | Default | Required | +| --- | --- | --- | --- | +| input file | OpenMetrics file to read samples from. | | Yes | +| output directory | Output directory for generated blocks. | `data/` | | + + + + +##### `promtool tsdb create-blocks-from rules` + +Create blocks of data for new recording rules. + + + +###### Flags + +| Flag | Description | Default | +| --- | --- | --- | +| --http.config.file | HTTP client configuration file for promtool to connect to Prometheus. | | +| --url | The URL for the Prometheus API with the data where the rule will be backfilled from. | `http://localhost:9090` | +| --start | The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required. | | +| --end | If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp. | | +| --output-dir | Output directory for generated blocks. | `data/` | +| --eval-interval | How frequently to evaluate rules when backfilling if a value is not set in the recording rule files. | `60s` | + + + + +###### Arguments + +| Argument | Description | Required | +| --- | --- | --- | +| rule-files | A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated. | Yes | + + diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md index 74f6c02b12..3c1ec84f0f 100644 --- a/docs/configuration/alerting_rules.md +++ b/docs/configuration/alerting_rules.md @@ -32,7 +32,11 @@ groups: ``` The optional `for` clause causes Prometheus to wait for a certain duration -between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state. +between first encountering a new expression output vector element and counting +an alert as firing for this element. In this case, Prometheus will check that +the alert continues to be active during each evaluation for 10 minutes before +firing the alert. Elements that are active, but not firing yet, are in the pending state. +Alerting rules without the `for` clause will become active on the first evaluation. The `labels` clause allows specifying a set of additional labels to be attached to the alert. Any existing conflicting labels will be overwritten. The label diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 49affc393d..30bb07a8c1 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -73,11 +73,49 @@ global: # Reloading the configuration will reopen the file. [ query_log_file: ] + # An uncompressed response body larger than this many bytes will cause the + # scrape to fail. 0 means no limit. Example: 100MB. + # This is an experimental feature, this behaviour could + # change or be removed in the future. + [ body_size_limit: | default = 0 ] + + # Per-scrape limit on number of scraped samples that will be accepted. + # If more than this number of samples are present after metric relabeling + # the entire scrape will be treated as failed. 0 means no limit. + [ sample_limit: | default = 0 ] + + # Per-scrape limit on number of labels that will be accepted for a sample. If + # more than this number of labels are present post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_limit: | default = 0 ] + + # Per-scrape limit on length of labels name that will be accepted for a sample. + # If a label name is longer than this number post metric-relabeling, the entire + # scrape will be treated as failed. 0 means no limit. + [ label_name_length_limit: | default = 0 ] + + # Per-scrape limit on length of labels value that will be accepted for a sample. + # If a label value is longer than this number post metric-relabeling, the + # entire scrape will be treated as failed. 0 means no limit. + [ label_value_length_limit: | default = 0 ] + + # Per-scrape config limit on number of unique targets that will be + # accepted. If more than this number of targets are present after target + # relabeling, Prometheus will mark the targets as failed without scraping them. + # 0 means no limit. This is an experimental feature, this behaviour could + # change in the future. + [ target_limit: | default = 0 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: [ - ... ] +# Scrape config files specifies a list of globs. Scrape configs are read from +# all matching files and appended to the list of scrape configs. +scrape_config_files: + [ - ... ] + # A list of scrape configurations. scrape_configs: [ - ... ] @@ -129,6 +167,10 @@ job_name: # Per-scrape timeout when scraping this job. [ scrape_timeout: | default = ] +# Whether to scrape a classic histogram that is also exposed as a native +# histogram (has no effect without --enable-feature=native-histograms). +[ scrape_classic_histograms: | default = false ] + # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] @@ -200,7 +242,7 @@ oauth2: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # Configures the scrape request's TLS settings. tls_config: @@ -208,6 +250,16 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] + # List of Azure service discovery configurations. azure_sd_configs: @@ -294,6 +346,10 @@ nomad_sd_configs: openstack_sd_configs: [ - ... ] +# List of OVHcloud service discovery configurations. +ovhcloud_sd_configs: + [ - ... ] + # List of PuppetDB service discovery configurations. puppetdb_sd_configs: [ - ... ] @@ -331,6 +387,7 @@ metric_relabel_configs: # This is an experimental feature, this behaviour could # change or be removed in the future. [ body_size_limit: | default = 0 ] + # Per-scrape limit on number of scraped samples that will be accepted. # If more than this number of samples are present after metric relabeling # the entire scrape will be treated as failed. 0 means no limit. @@ -357,6 +414,11 @@ metric_relabel_configs: # 0 means no limit. This is an experimental feature, this behaviour could # change in the future. [ target_limit: | default = 0 ] + +# Limit on total number of positive and negative buckets allowed in a single +# native histogram. If this is exceeded, the entire scrape will be treated as +# failed. 0 means no limit. +[ native_histogram_bucket_limit: | default = 0 ] ``` Where `` must be unique across all scrape configurations. @@ -366,11 +428,16 @@ Where `` must be unique across all scrape configurations. A `tls_config` allows configuring TLS connections. ```yaml -# CA certificate to validate API server certificate with. +# CA certificate to validate API server certificate with. At most one of ca and ca_file is allowed. +[ ca: ] [ ca_file: ] -# Certificate and key files for client cert authentication to the server. +# Certificate and key for client cert authentication to the server. +# At most one of cert and cert_file is allowed. +# At most one of key and key_file is allowed. +[ cert: ] [ cert_file: ] +[ key: ] [ key_file: ] # ServerName extension to indicate the name of the server. @@ -385,6 +452,11 @@ A `tls_config` allows configuring TLS connections. # If unset, Prometheus will use Go default minimum version, which is TLS 1.2. # See MinVersion in https://pkg.go.dev/crypto/tls#Config. [ min_version: ] +# Maximum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS +# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3). +# If unset, Prometheus will use Go default maximum version, which is TLS 1.3. +# See MaxVersion in https://pkg.go.dev/crypto/tls#Config. +[ max_version: ] ``` ### `` @@ -418,6 +490,15 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] ``` ### `` @@ -436,6 +517,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_azure_machine_resource_group`: the machine's resource group * `__meta_azure_machine_tag_`: each tag value of the machine * `__meta_azure_machine_scale_set`: the name of the scale set which the vm is part of (this value is only set if you are using a [scale set](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/)) +* `__meta_azure_machine_size`: the machine size * `__meta_azure_subscription_id`: the subscription ID * `__meta_azure_tenant_id`: the tenant ID @@ -496,12 +578,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -518,6 +609,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_consul_address`: the address of the target * `__meta_consul_dc`: the datacenter name for the target * `__meta_consul_health`: the health status of the service +* `__meta_consul_partition`: the admin partition name where the service is registered * `__meta_consul_metadata_`: each node metadata key value of the target * `__meta_consul_node`: the node name defined for the target * `__meta_consul_service_address`: the service address of the target @@ -532,10 +624,14 @@ The following meta labels are available on targets during [relabeling](#relabel_ # The information to access the Consul API. It is to be defined # as the Consul documentation requires. [ server: | default = "localhost:8500" ] +# Prefix for URIs for when consul is behind an API gateway (reverse proxy). +[ path_prefix: ] [ token: ] [ datacenter: ] # Namespaces are only supported in Consul Enterprise. [ namespace: ] +# Admin Partitions are only supported in Consul Enterprise. +[ partition: ] [ scheme: | default = "http" ] # The username and password fields are deprecated in favor of the basic_auth configuration. [ username: ] @@ -595,12 +691,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -673,12 +778,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -722,6 +836,15 @@ host: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # TLS configuration. tls_config: @@ -776,7 +899,7 @@ oauth2: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] ``` @@ -888,6 +1011,15 @@ host: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # TLS configuration. tls_config: @@ -944,7 +1076,7 @@ oauth2: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] ``` @@ -1057,6 +1189,54 @@ See below for the configuration options for EC2 discovery: filters: [ - name: values: , [...] ] + +# Authentication information used to authenticate to the EC2 API. +# Note that `basic_auth`, `authorization` and `oauth2` options are +# mutually exclusive. +# `password` and `password_file` are mutually exclusive. + +# Optional HTTP basic authentication information, currently not supported by AWS. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Optional `Authorization` header configuration, currently not supported by AWS. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials to the credentials read from the configured file. + # It is mutuall exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration, currently not supported by AWS. +oauth2: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] + +# Configure whether HTTP requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# Whether to enable HTTP2. +[ enable_http2: | default: true ] + +# TLS configuration. +tls_config: + [ ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful @@ -1096,6 +1276,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_address_pool`: the pool of the private IP. * `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. * `__meta_openstack_instance_id`: the OpenStack instance ID. +* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. * `__meta_openstack_instance_status`: the status of the OpenStack instance. * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. @@ -1173,6 +1354,66 @@ tls_config: [ ] ``` +### `` + +OVHcloud SD configurations allow retrieving scrape targets from OVHcloud's [dedicated servers](https://www.ovhcloud.com/en/bare-metal/) and [VPS](https://www.ovhcloud.com/en/vps/) using +their [API](https://api.ovh.com/). +Prometheus will periodically check the REST endpoint and create a target for every discovered server. +The role will try to use the public IPv4 address as default address, if there's none it will try to use the IPv6 one. This may be changed with relabeling. +For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud/) you can use the [openstack_sd_config](#openstack_sd_config). + +#### VPS + +* `__meta_ovhcloud_vps_cluster`: the cluster of the server +* `__meta_ovhcloud_vps_datacenter`: the datacenter of the server +* `__meta_ovhcloud_vps_disk`: the disk of the server +* `__meta_ovhcloud_vps_display_name`: the display name of the server +* `__meta_ovhcloud_vps_ipv4`: the IPv4 of the server +* `__meta_ovhcloud_vps_ipv6`: the IPv6 of the server +* `__meta_ovhcloud_vps_keymap`: the KVM keyboard layout of the server +* `__meta_ovhcloud_vps_maximum_additional_ip`: the maximum additional IPs of the server +* `__meta_ovhcloud_vps_memory_limit`: the memory limit of the server +* `__meta_ovhcloud_vps_memory`: the memory of the server +* `__meta_ovhcloud_vps_monitoring_ip_blocks`: the monitoring IP blocks of the server +* `__meta_ovhcloud_vps_name`: the name of the server +* `__meta_ovhcloud_vps_netboot_mode`: the netboot mode of the server +* `__meta_ovhcloud_vps_offer_type`: the offer type of the server +* `__meta_ovhcloud_vps_offer`: the offer of the server +* `__meta_ovhcloud_vps_state`: the state of the server +* `__meta_ovhcloud_vps_vcore`: the number of virtual cores of the server +* `__meta_ovhcloud_vps_version`: the version of the server +* `__meta_ovhcloud_vps_zone`: the zone of the server + +#### Dedicated servers + +* `__meta_ovhcloud_dedicated_server_commercial_range`: the commercial range of the server +* `__meta_ovhcloud_dedicated_server_datacenter`: the datacenter of the server +* `__meta_ovhcloud_dedicated_server_ipv4`: the IPv4 of the server +* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server +* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server +* `__meta_ovhcloud_dedicated_server_name`: the name of the server +* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server +* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server +* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server +* `__meta_ovhcloud_dedicated_server_server_id`: the ID of the server +* `__meta_ovhcloud_dedicated_server_state`: the state of the server +* `__meta_ovhcloud_dedicated_server_support_level`: the support level of the server + +See below for the configuration options for OVHcloud discovery: + +```yaml +# Access key to use. https://api.ovh.com +application_key: +application_secret: +consumer_key: +# Service of the targets to retrieve. Must be `vps` or `dedicated_server`. +service: +# API endpoint. https://github.com/ovh/go-ovh#supported-apis +[ endpoint: | default = "ovh-eu" ] +# Refresh interval to re-read the resources list. +[ refresh_interval: | default = 60s ] +``` + ### `` PuppetDB SD configurations allow retrieving scrape targets from @@ -1253,12 +1494,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] ``` See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml) @@ -1409,7 +1659,6 @@ The labels below are only available for targets with `role` set to `hcloud`: * `__meta_hetzner_hcloud_image_description`: the description of the server image * `__meta_hetzner_hcloud_image_os_flavor`: the OS flavor of the server image * `__meta_hetzner_hcloud_image_os_version`: the OS version of the server image -* `__meta_hetzner_hcloud_image_description`: the description of the server image * `__meta_hetzner_hcloud_datacenter_location`: the location of the server * `__meta_hetzner_hcloud_datacenter_location_network_zone`: the network zone of the server * `__meta_hetzner_hcloud_server_type`: the type of the server @@ -1462,12 +1711,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -1547,12 +1805,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -1621,12 +1888,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -1708,6 +1984,7 @@ Available meta labels: * `__meta_kubernetes_pod_annotationpresent_`: `true` for each annotation from the pod object. * `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) * `__meta_kubernetes_pod_container_name`: Name of the container the target address points to. +* `__meta_kubernetes_pod_container_id`: ID of the container the target address points to. The ID is in the form `://`. * `__meta_kubernetes_pod_container_image`: The image the container is using. * `__meta_kubernetes_pod_container_port_name`: Name of the container port. * `__meta_kubernetes_pod_container_port_number`: Number of the container port. @@ -1761,6 +2038,8 @@ Available meta labels: * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object. * `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the address of the target. * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`: Set to `true` or `false` for the referenced endpoint's ready state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_serving`: Set to `true` or `false` for the referenced endpoint's serving state. + * `__meta_kubernetes_endpointslice_endpoint_conditions_terminating`: Set to `true` or `false` for the referenced endpoint's terminating state. * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`: Name of the node hosting the referenced endpoint. * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation. * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint. @@ -1834,12 +2113,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -1879,7 +2167,7 @@ attach_metadata: See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml) for a detailed example of configuring Prometheus for Kubernetes. -You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), +You may wish to check out the 3rd party [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), which automates the Prometheus setup on top of Kubernetes. ### `` @@ -1911,6 +2199,15 @@ server: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # TLS configuration. tls_config: @@ -1947,7 +2244,7 @@ oauth2: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] ``` The [relabeling phase](#relabel_config) is the preferred and more powerful way @@ -2000,6 +2297,54 @@ See below for the configuration options for Lightsail discovery: # The port to scrape metrics from. If using the public IP address, this must # instead be specified in the relabeling rule. [ port: | default = 80 ] + +# Authentication information used to authenticate to the Lightsail API. +# Note that `basic_auth`, `authorization` and `oauth2` options are +# mutually exclusive. +# `password` and `password_file` are mutually exclusive. + +# Optional HTTP basic authentication information, currently not supported by AWS. +basic_auth: + [ username: ] + [ password: ] + [ password_file: ] + +# Optional `Authorization` header configuration, currently not supported by AWS. +authorization: + # Sets the authentication type. + [ type: | default: Bearer ] + # Sets the credentials. It is mutually exclusive with + # `credentials_file`. + [ credentials: ] + # Sets the credentials to the credentials read from the configured file. + # It is mutuall exclusive with `credentials`. + [ credentials_file: ] + +# Optional OAuth 2.0 configuration, currently not supported by AWS. +oauth2: + [ ] + +# Optional proxy URL. +[ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] + +# Configure whether HTTP requests follow HTTP 3xx redirects. +[ follow_redirects: | default = true ] + +# Whether to enable HTTP2. +[ enable_http2: | default: true ] + +# TLS configuration. +tls_config: + [ ] ``` ### `` @@ -2062,12 +2407,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -2153,7 +2507,7 @@ oauth2: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration for connecting to marathon servers tls_config: @@ -2161,6 +2515,15 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] ``` By default every app listed in Marathon will be scraped by Prometheus. If not all @@ -2216,7 +2579,6 @@ The following meta labels are available on targets during [relabeling](#relabel_ # The information to access the Nomad API. It is to be defined # as the Nomad documentation requires. [ allow_stale: | default = true ] -[ datacenter: ] [ namespace: | default = default ] [ refresh_interval: | default = 60s ] [ region: | default = global ] @@ -2251,12 +2613,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -2266,7 +2637,7 @@ tls_config: ### `` Serverset SD configurations allow retrieving scrape targets from [Serversets] -(https://github.com/twitter/finagle/tree/master/finagle-serversets) which are +(https://github.com/twitter/finagle/tree/develop/finagle-serversets) which are stored in [Zookeeper](https://zookeeper.apache.org/). Serversets are commonly used by [Finagle](https://twitter.github.io/finagle/) and [Aurora](https://aurora.apache.org/). @@ -2428,12 +2799,21 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # Refresh interval to re-read the app instance list. [ refresh_interval: | default = 30s ] @@ -2540,10 +2920,19 @@ tags_filter: [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # TLS configuration. tls_config: @@ -2611,12 +3000,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -2684,12 +3082,21 @@ oauth2: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # TLS configuration. tls_config: @@ -2790,6 +3197,8 @@ anchored on both ends. To un-anchor the regex, use `.*.*`. * `uppercase`: Maps the concatenated `source_labels` to their upper case. * `keep`: Drop targets for which `regex` does not match the concatenated `source_labels`. * `drop`: Drop targets for which `regex` matches the concatenated `source_labels`. +* `keepequal`: Drop targets for which the concatenated `source_labels` do not match `target_label`. +* `dropequal`: Drop targets for which the concatenated `source_labels` do match `target_label`. * `hashmod`: Set `target_label` to the `modulus` of a hash of the concatenated `source_labels`. * `labelmap`: Match `regex` against all source label names, not just those specified in `source_labels`. Then copy the values of the matching labels to label names given by `replacement` with match @@ -2875,12 +3284,21 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # List of Azure service discovery configurations. azure_sd_configs: @@ -2962,6 +3380,10 @@ nomad_sd_configs: openstack_sd_configs: [ - ... ] +# List of OVHcloud service discovery configurations. +ovhcloud_sd_configs: + [ - ... ] + # List of PuppetDB service discovery configurations. puppetdb_sd_configs: [ - ... ] @@ -3028,6 +3450,9 @@ write_relabel_configs: # Enables sending of exemplars over remote write. Note that exemplar storage itself must be enabled for exemplars to be scraped in the first place. [ send_exemplars: | default = false ] +# Enables sending of native histograms, also known as sparse histograms, over remote write. +[ send_native_histograms: | default = false ] + # Sets the `Authorization` header on every remote write request with the # configured username and password. # password and password_file are mutually exclusive. @@ -3048,7 +3473,7 @@ authorization: [ credentials_file: ] # Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3067,22 +3492,41 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. oauth2: [ ] +# Optional AzureAD configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +azuread: + # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. + [ cloud: | default = AzurePublic ] + + # Azure User-assigned Managed identity. + [ managed_identity: + [ client_id: ] + # Configures the remote write request's TLS settings. tls_config: [ ] # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # Configures the queue used to write to remote storage. queue_config: @@ -3181,12 +3625,21 @@ tls_config: # Optional proxy URL. [ proxy_url: ] +# Comma-separated string that can contain IPs, CIDR notation, domain names +# that should be excluded from proxying. IP and domain names can +# contain port numbers. +[ no_proxy: ] +# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy) +[ proxy_from_environment: | default: false ] +# Specifies headers to send to proxies during CONNECT requests. +[ proxy_connect_header: + [ : [, ...] ] ] # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] # Whether to enable HTTP2. -[ enable_http2: | default: true ] +[ enable_http2: | default: true ] # Whether to use the external labels as selectors for the remote read endpoint. [ filter_external_labels: | default = true ] diff --git a/docs/configuration/https.md b/docs/configuration/https.md index 0513612f5c..bc83e07a38 100644 --- a/docs/configuration/https.md +++ b/docs/configuration/https.md @@ -44,6 +44,13 @@ tls_server_config: # CA certificate for client certificate authentication to the server. [ client_ca_file: ] + # Verify that the client certificate has a Subject Alternate Name (SAN) + # which is an exact match to an entry in this list, else terminate the + # connection. SAN match can be one or multiple of the following: DNS, + # IP, e-mail, or URI address from https://pkg.go.dev/crypto/x509#Certificate. + [ client_allowed_sans: + [ - ] ] + # Minimum TLS version that is acceptable. [ min_version: | default = "TLS12" ] @@ -64,7 +71,7 @@ tls_server_config: # client's most preferred ciphersuite, or the server's most preferred # ciphersuite. If true then the server's preference, as expressed in # the order of elements in cipher_suites, is used. - [ prefer_server_cipher_suites: | default = true ] + [ prefer_server_cipher_suites: | default = true ] # Elliptic curves that will be used in an ECDHE handshake, in preference # order. Available curves are documented in the go documentation: diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index a14aab09d8..eda0214b35 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -17,6 +17,10 @@ Rule files use YAML. The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus process. The changes are only applied if all rule files are well-formatted. +_Note about native histograms (experimental feature): Native histogram are always +recorded as gauge histograms (for now). Most cases will create gauge histograms +naturally, e.g. after `rate()`._ + ## Syntax-checking rules To quickly check whether a rule file is syntactically correct without starting @@ -66,8 +70,8 @@ A simple example rules file would be: groups: - name: example rules: - - record: job:http_inprogress_requests:sum - expr: sum by (job) (http_inprogress_requests) + - record: code:prometheus_http_requests_total:sum + expr: sum by (code) (prometheus_http_requests_total) ``` ### `` @@ -119,6 +123,10 @@ expr: # Alerts which have not yet fired for long enough are considered pending. [ for: | default = 0s ] +# How long an alert will continue firing after the condition that triggered it +# has cleared. +[ keep_firing_for: | default = 0s ] + # Labels to add or overwrite for each alert. labels: [ : ] @@ -128,6 +136,9 @@ annotations: [ : ] ``` +See also the +[best practices for naming metrics created by recording rules](https://prometheus.io/docs/practices/rules/#recording-rules). + # Limiting alerts and series A limit for alerts produced by alerting rules and series produced recording rules diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index d99255f01f..efd168b358 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -77,14 +77,17 @@ series: # This uses expanding notation. # Expanding notation: # 'a+bxc' becomes 'a a+b a+(2*b) a+(3*b) … a+(c*b)' +# Read this as series starts at a, then c further samples incrementing by b. # 'a-bxc' becomes 'a a-b a-(2*b) a-(3*b) … a-(c*b)' +# Read this as series starts at a, then c further samples decrementing by b (or incrementing by negative b). # There are special values to indicate missing and stale samples: # '_' represents a missing sample from scrape # 'stale' indicates a stale sample # Examples: -# 1. '-2+4x3' becomes '-2 2 6 10' -# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' -# 3. ' 1 _x3 stale' becomes '1 _ _ _ stale' +# 1. '-2+4x3' becomes '-2 2 6 10' - series starts at -2, then 3 further samples incrementing by 4. +# 2. ' 1-2x4' becomes '1 -1 -3 -5 -7' - series starts at 1, then 4 further samples decrementing by 2. +# 3. ' 1x4' becomes '1 1 1 1 1' - shorthand for '1+0x4', series starts at 1, then 4 further samples incrementing by 0. +# 4. ' 1 _x3 stale' becomes '1 _ _ _ stale' - the missing sample cannot increment, so 3 missing samples are produced by the '_x3' expression. values: ``` diff --git a/docs/feature_flags.md b/docs/feature_flags.md index ddf10798d8..58e49e3b4b 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -1,6 +1,6 @@ --- title: Feature flags -sort_rank: 11 +sort_rank: 12 --- # Feature flags @@ -103,3 +103,26 @@ When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior. In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed. + +## Native Histograms + +`--enable-feature=native-histograms` + +When enabled, Prometheus will ingest native histograms (formerly also known as +sparse histograms or high-res histograms). Native histograms are still highly +experimental. Expect breaking changes to happen (including those rendering the +TSDB unreadable). + +Native histograms are currently only supported in the traditional Prometheus +protobuf exposition format. This feature flag therefore also enables a new (and +also experimental) protobuf parser, through which _all_ metrics are ingested +(i.e. not only native histograms). Prometheus will try to negotiate the +protobuf format first. The instrumented target needs to support the protobuf +format, too, _and_ it needs to expose native histograms. The protobuf format +allows to expose conventional and native histograms side by side. With this +feature flag disabled, Prometheus will continue to parse the conventional +histogram (albeit via the text format). With this flag enabled, Prometheus will +still ingest those conventional histograms that do not come with a +corresponding native histogram. However, if a native histogram is present, +Prometheus will ignore the corresponding conventional histogram, with the +notable exception of exemplars, which are always ingested. diff --git a/docs/federation.md b/docs/federation.md index 1b4f04d73d..3344a0ed05 100644 --- a/docs/federation.md +++ b/docs/federation.md @@ -8,6 +8,16 @@ sort_rank: 6 Federation allows a Prometheus server to scrape selected time series from another Prometheus server. +_Note about native histograms (experimental feature): To scrape native histograms +via federation, the scraping Prometheus server needs to run with native histograms +enabled (via the command line flag `--enable-feature=native-histograms`), implying +that the protobuf format is used for scraping. Should the federated metrics contain +a mix of different sample types (float64, counter histogram, gauge histogram) for +the same metric name, the federation payload will contain multiple metric families +with the same name (but different types). Technically, this violates the rules of +the protobuf exposition format, but Prometheus is nevertheless able to ingest all +metrics correctly._ + ## Use cases There are different use cases for federation. Commonly, it is used to either diff --git a/docs/getting_started.md b/docs/getting_started.md index 028b1c86f0..e89ac705ee 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -250,3 +250,18 @@ scrape_configs: Restart Prometheus with the new configuration and verify that a new time series with the metric name `job_instance_mode:node_cpu_seconds:avg_rate5m` is now available by querying it through the expression browser or graphing it. + +## Reloading configuration + +As mentioned in the [configuration documentation](configuration/configuration.md) a +Prometheus instance can have its configuration reloaded without restarting the +process by using the `SIGHUP` signal. If you're running on Linux this can be +performed by using `kill -s SIGHUP `, replacing `` with your Prometheus +process ID. + +## Shutting down your instance gracefully. + +While Prometheus does have recovery mechanisms in the case that there is an +abrupt process failure it is recommend to use the `SIGTERM` signal to cleanly +shutdown a Prometheus instance. If you're running on Linux this can be performed +by using `kill -s SIGTERM `, replacing `` with your Prometheus process ID. diff --git a/docs/installation.md b/docs/installation.md index 592d67b28e..05df14a46e 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -87,7 +87,7 @@ the following third-party contributions: ### Ansible -* [Cloud Alchemy/ansible-prometheus](https://github.com/cloudalchemy/ansible-prometheus) +* [prometheus-community/ansible](https://github.com/prometheus-community/ansible) ### Chef diff --git a/docs/migration.md b/docs/migration.md index 49e9aee8b9..cb88bbfd6f 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -1,6 +1,6 @@ --- title: Migration -sort_rank: 9 +sort_rank: 10 --- # Prometheus 2.0 migration guide diff --git a/docs/querying/api.md b/docs/querying/api.md index 8c3eb9d244..ca7f64f622 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -407,7 +407,7 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "EpTxMJ40fUus7aGY" }, "value": "6", - "timestamp": 1600096945.479, + "timestamp": 1600096945.479 } ] }, @@ -424,15 +424,15 @@ $ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metr "traceID": "Olp9XHlq763ccsfa" }, "value": "19", - "timestamp": 1600096955.479, + "timestamp": 1600096955.479 }, { "labels": { "traceID": "hCtjygkIHwAN9vs4" }, "value": "20", - "timestamp": 1600096965.489, - }, + "timestamp": 1600096965.489 + } ] } ] @@ -447,6 +447,10 @@ sample values. JSON does not support special float values such as `NaN`, `Inf`, and `-Inf`, so sample values are transferred as quoted JSON strings rather than raw numbers. +The keys `"histogram"` and `"histograms"` only show up if the experimental +native histograms are present in the response. Their placeholder `` +is explained in detail in its own section below. + ### Range vectors Range vectors are returned as result type `matrix`. The corresponding @@ -456,12 +460,16 @@ Range vectors are returned as result type `matrix`. The corresponding [ { "metric": { "": "", ... }, - "values": [ [ , "" ], ... ] + "values": [ [ , "" ], ... ], + "histograms": [ [ , ], ... ] }, ... ] ``` +Each series could have the `"values"` key, or the `"histograms"` key, or both. +For a given timestamp, there will only be one sample of either float or histogram type. + ### Instant vectors Instant vectors are returned as result type `vector`. The corresponding @@ -471,12 +479,15 @@ Instant vectors are returned as result type `vector`. The corresponding [ { "metric": { "": "", ... }, - "value": [ , "" ] + "value": [ , "" ], + "histogram": [ , ] }, ... ] ``` +Each series could have the `"value"` key, or the `"histogram"` key, but not both. + ### Scalars Scalar results are returned as result type `scalar`. The corresponding @@ -495,6 +506,33 @@ String results are returned as result type `string`. The corresponding [ , "" ] ``` +### Native histograms + +The `` placeholder used above is formatted as follows. + +_Note that native histograms are an experimental feature, and the format below +might still change._ + +``` +{ + "count": "", + "sum": "", + "buckets": [ [ , "", "", "" ], ... ] +} +``` + +The `` placeholder is an integer between 0 and 3 with the +following meaning: + +* 0: “open left” (left boundary is exclusive, right boundary in inclusive) +* 1: “open right” (left boundary is inclusive, right boundary in exclusive) +* 2: “open both” (both boundaries are exclusive) +* 3: “closed both” (both boundaries are inclusive) + +Note that with the currently implemented bucket schemas, positive buckets are +“open left”, negative buckets are “open right”, and the zero bucket (with a +negative left boundary and a positive right boundary) is “closed both”. + ## Targets The following endpoint returns an overview of the current state of the @@ -588,6 +626,38 @@ $ curl 'http://localhost:9090/api/v1/targets?state=active' } ``` +The `scrapePool` query parameter allows the caller to filter by scrape pool name. + +```json +$ curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter' +{ + "status": "success", + "data": { + "activeTargets": [ + { + "discoveredLabels": { + "__address__": "127.0.0.1:9091", + "__metrics_path__": "/metrics", + "__scheme__": "http", + "job": "node_exporter" + }, + "labels": { + "instance": "127.0.0.1:9091", + "job": "node_exporter" + }, + "scrapePool": "node_exporter", + "scrapeUrl": "http://127.0.0.1:9091/metrics", + "globalUrl": "http://example-prometheus:9091/metrics", + "lastError": "", + "lastScrape": "2017-01-17T15:07:44.723715405+01:00", + "lastScrapeDuration": 50688943, + "health": "up" + } + ], + "droppedTargets": [] + } +} +``` ## Rules @@ -603,7 +673,11 @@ GET /api/v1/rules ``` URL query parameters: + - `type=alert|record`: return only the alerting rules (e.g. `type=alert`) or the recording rules (e.g. `type=record`). When the parameter is absent or empty, no filtering is done. +- `rule_name[]=`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done. +- `rule_group[]=`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. +- `file[]=`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. ```json $ curl http://localhost:9090/api/v1/rules @@ -789,6 +863,7 @@ GET /api/v1/metadata URL query parameters: - `limit=`: Maximum number of metrics to return. +- `limit_per_metric=`: Maximum number of metadata to return per metric. - `metric=`: A metric name to filter metadata for. All metric metadata is retrieved if left empty. The `data` section of the query result consists of an object where each key is a metric name and each value is a list of unique metadata objects, as exposed for that metric name across all targets. @@ -824,6 +899,32 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2 } ``` +The following example returns only one metadata entry for each metric. + +```json +curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1 + +{ + "status": "success", + "data": { + "cortex_ring_tokens": [ + { + "type": "gauge", + "help": "Number of tokens in the ring", + "unit": "" + } + ], + "http_requests_total": [ + { + "type": "counter", + "help": "Number of HTTP requests", + "unit": "" + } + ] + } +} +``` + The following example returns metadata only for the metric `http_requests_total`. ```json @@ -1000,6 +1101,10 @@ The following endpoint returns various cardinality statistics about the Promethe ``` GET /api/v1/status/tsdb ``` +URL query parameters: +- `limit=`: Limit the number of returned items to a given number for each set of statistics. By default, 10 items are returned. + +The `data` section of the query result consists of - **headStats**: This provides the following data about the head block of the TSDB: - **numSeries**: The number of series. - **chunkCount**: The number of chunks. diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 108288f664..9eb95c66eb 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -32,6 +32,16 @@ expression), only some of these types are legal as the result from a user-specified expression. For example, an expression that returns an instant vector is the only type that can be directly graphed. +_Notes about the experimental native histograms:_ + +* Ingesting native histograms has to be enabled via a [feature + flag](../feature_flags/#native-histograms). +* Once native histograms have been ingested into the TSDB (and even after + disabling the feature flag again), both instant vectors and range vectors may + now contain samples that aren't simple floating point numbers (float samples) + but complete histograms (histogram samples). A vector may contain a mix of + float samples and histogram samples. + ## Literals ### String literals @@ -147,9 +157,11 @@ syntax](https://github.com/google/re2/wiki/Syntax). Range vector literals work like instant vector literals, except that they select a range of samples back from the current instant. Syntactically, a [time -duration](#time-durations) is appended in square brackets (`[]`) at the end of a -vector selector to specify how far back in time values should be fetched for -each resulting range vector element. +duration](#time-durations) is appended in square brackets (`[]`) at the end of +a vector selector to specify how far back in time values should be fetched for +each resulting range vector element. The range is a closed interval, +i.e. samples with timestamps coinciding with either boundary of the range are +still included in the selection. In this example, we select all the values we have recorded within the last 5 minutes for all time series that have the metric name `http_requests_total` and diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 5f0774136b..55ed92ecc8 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -11,6 +11,20 @@ instant-vector)`. This means that there is one argument `v` which is an instant vector, which if not provided it will default to the value of the expression `vector(time())`. +_Notes about the experimental native histograms:_ + +* Ingesting native histograms has to be enabled via a [feature + flag](../feature_flags/#native-histograms). As long as no native histograms + have been ingested into the TSDB, all functions will behave as usual. +* Functions that do not explicitly mention native histograms in their + documentation (see below) will ignore histogram samples. +* Functions that do already act on native histograms might still change their + behavior in the future. +* If a function requires the same bucket layout between multiple native + histograms it acts on, it will automatically convert them + appropriately. (With the currently supported bucket schemas, that's always + possible.) + ## `abs()` `abs(v instant-vector)` returns the input vector with all sample values converted to @@ -19,8 +33,8 @@ their absolute value. ## `absent()` `absent(v instant-vector)` returns an empty vector if the vector passed to it -has any elements and a 1-element vector with the value 1 if the vector passed to -it has no elements. +has any elements (floats or native histograms) and a 1-element vector with the +value 1 if the vector passed to it has no elements. This is useful for alerting on when no time series exist for a given metric name and label combination. @@ -42,8 +56,8 @@ of the 1-element output vector from the input vector. ## `absent_over_time()` `absent_over_time(v range-vector)` returns an empty vector if the range vector -passed to it has any elements and a 1-element vector with the value 1 if the -range vector passed to it has no elements. +passed to it has any elements (floats or native histograms) and a 1-element +vector with the value 1 if the range vector passed to it has no elements. This is useful for alerting on when no time series exist for a given metric name and label combination for a certain amount of time. @@ -130,7 +144,14 @@ between now and 2 hours ago: delta(cpu_temp_celsius{host="zeus"}[2h]) ``` -`delta` should only be used with gauges. +`delta` acts on native histograms by calculating a new histogram where each +compononent (sum and count of observations, buckets) is the difference between +the respective component in the first and last native histogram in +`v`. However, each element in `v` that contains a mix of float and native +histogram samples within the range, will be missing from the result vector. + +`delta` should only be used with gauges and native histograms where the +components behave like gauges (so-called gauge histograms). ## `deriv()` @@ -154,53 +175,154 @@ Special cases are: `floor(v instant-vector)` rounds the sample values of all elements in `v` down to the nearest integer. +## `histogram_count()` and `histogram_sum()` + +_Both functions only act on native histograms, which are an experimental +feature. The behavior of these functions may change in future versions of +Prometheus, including their removal from PromQL._ + +`histogram_count(v instant-vector)` returns the count of observations stored in +a native histogram. Samples that are not native histograms are ignored and do +not show up in the returned vector. + +Similarly, `histogram_sum(v instant-vector)` returns the sum of observations +stored in a native histogram. + +Use `histogram_count` in the following way to calculate a rate of observations +(in this case corresponding to “requests per second”) from a native histogram: + + histogram_count(rate(http_request_duration_seconds[10m])) + +The additional use of `histogram_sum` enables the calculation of the average of +observed values (in this case corresponding to “average request duration”): + + histogram_sum(rate(http_request_duration_seconds[10m])) + / + histogram_count(rate(http_request_duration_seconds[10m])) + +## `histogram_fraction()` + +_This function only acts on native histograms, which are an experimental +feature. The behavior of this function may change in future versions of +Prometheus, including its removal from PromQL._ + +For a native histogram, `histogram_fraction(lower scalar, upper scalar, v +instant-vector)` returns the estimated fraction of observations between the +provided lower and upper values. Samples that are not native histograms are +ignored and do not show up in the returned vector. + +For example, the following expression calculates the fraction of HTTP requests +over the last hour that took 200ms or less: + + histogram_fraction(0, 0.2, rate(http_request_duration_seconds[1h])) + +The error of the estimation depends on the resolution of the underlying native +histogram and how closely the provided boundaries are aligned with the bucket +boundaries in the histogram. + +`+Inf` and `-Inf` are valid boundary values. For example, if the histogram in +the expression above included negative observations (which shouldn't be the +case for request durations), the appropriate lower boundary to include all +observations less than or equal 0.2 would be `-Inf` rather than `0`. + +Whether the provided boundaries are inclusive or exclusive is only relevant if +the provided boundaries are precisely aligned with bucket boundaries in the +underlying native histogram. In this case, the behavior depends on the schema +definition of the histogram. The currently supported schemas all feature +inclusive upper boundaries and exclusive lower boundaries for positive values +(and vice versa for negative values). Without a precise alignment of +boundaries, the function uses linear interpolation to estimate the +fraction. With the resulting uncertainty, it becomes irrelevant if the +boundaries are inclusive or exclusive. + ## `histogram_quantile()` -`histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ φ -≤ 1) from the buckets `b` of a -[histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). (See -[histograms and summaries](https://prometheus.io/docs/practices/histograms) for -a detailed explanation of φ-quantiles and the usage of the histogram metric type -in general.) The samples in `b` are the counts of observations in each bucket. -Each sample must have a label `le` where the label value denotes the inclusive -upper bound of the bucket. (Samples without such a label are silently ignored.) -The [histogram metric type](https://prometheus.io/docs/concepts/metric_types/#histogram) -automatically provides time series with the `_bucket` suffix and the appropriate -labels. +`histogram_quantile(φ scalar, b instant-vector)` calculates the φ-quantile (0 ≤ +φ ≤ 1) from a [conventional +histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) or from +a native histogram. (See [histograms and +summaries](https://prometheus.io/docs/practices/histograms) for a detailed +explanation of φ-quantiles and the usage of the (conventional) histogram metric +type in general.) + +_Note that native histograms are an experimental feature. The behavior of this +function when dealing with native histograms may change in future versions of +Prometheus._ + +The conventional float samples in `b` are considered the counts of observations +in each bucket of one or more conventional histograms. Each float sample must +have a label `le` where the label value denotes the inclusive upper bound of +the bucket. (Float samples without such a label are silently ignored.) The +other labels and the metric name are used to identify the buckets belonging to +each conventional histogram. The [histogram metric +type](https://prometheus.io/docs/concepts/metric_types/#histogram) +automatically provides time series with the `_bucket` suffix and the +appropriate labels. + +The native histogram samples in `b` are treated each individually as a separate +histogram to calculate the quantile from. + +As long as no naming collisions arise, `b` may contain a mix of conventional +and native histograms. Use the `rate()` function to specify the time window for the quantile calculation. -Example: A histogram metric is called `http_request_duration_seconds`. To -calculate the 90th percentile of request durations over the last 10m, use the -following expression: +Example: A histogram metric is called `http_request_duration_seconds` (and +therefore the metric name for the buckets of a conventional histogram is +`http_request_duration_seconds_bucket`). To calculate the 90th percentile of request +durations over the last 10m, use the following expression in case +`http_request_duration_seconds` is a conventional histogram: histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m])) +For a native histogram, use the following expression instead: + + histogram_quantile(0.9, rate(http_request_duration_seconds[10m])) + The quantile is calculated for each label combination in `http_request_duration_seconds`. To aggregate, use the `sum()` aggregator around the `rate()` function. Since the `le` label is required by -`histogram_quantile()`, it has to be included in the `by` clause. The following -expression aggregates the 90th percentile by `job`: +`histogram_quantile()` to deal with conventional histograms, it has to be +included in the `by` clause. The following expression aggregates the 90th +percentile by `job` for conventional histograms: histogram_quantile(0.9, sum by (job, le) (rate(http_request_duration_seconds_bucket[10m]))) + +When aggregating native histograms, the expression simplifies to: -To aggregate everything, specify only the `le` label: + histogram_quantile(0.9, sum by (job) (rate(http_request_duration_seconds[10m]))) + +To aggregate all conventional histograms, specify only the `le` label: histogram_quantile(0.9, sum by (le) (rate(http_request_duration_seconds_bucket[10m]))) -The `histogram_quantile()` function interpolates quantile values by -assuming a linear distribution within a bucket. The highest bucket -must have an upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If -a quantile is located in the highest bucket, the upper bound of the -second highest bucket is returned. A lower limit of the lowest bucket -is assumed to be 0 if the upper bound of that bucket is greater than -0. In that case, the usual linear interpolation is applied within that -bucket. Otherwise, the upper bound of the lowest bucket is returned -for quantiles located in the lowest bucket. +With native histograms, aggregating everything works as usual without any `by` clause: + + histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m]))) + +The `histogram_quantile()` function interpolates quantile values by +assuming a linear distribution within a bucket. + +If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is +returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. + +The following is only relevant for conventional histograms: If `b` contains +fewer than two buckets, `NaN` is returned. The highest bucket must have an +upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located +in the highest bucket, the upper bound of the second highest bucket is +returned. A lower limit of the lowest bucket is assumed to be 0 if the upper +bound of that bucket is greater than +0. In that case, the usual linear interpolation is applied within that +bucket. Otherwise, the upper bound of the lowest bucket is returned for +quantiles located in the lowest bucket. + +You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in +a histogram. + +You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in +a histogram. -If `b` has 0 observations, `NaN` is returned. If `b` contains fewer than two buckets, -`NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned. ## `holt_winters()` @@ -242,11 +364,17 @@ over the last 5 minutes, per time series in the range vector: increase(http_requests_total{job="api-server"}[5m]) ``` -`increase` should only be used with counters. It is syntactic sugar -for `rate(v)` multiplied by the number of seconds under the specified -time range window, and should be used primarily for human readability. -Use `rate` in recording rules so that increases are tracked consistently -on a per-second basis. +`increase` acts on native histograms by calculating a new histogram where each +component (sum and count of observations, buckets) is the increase between +the respective component in the first and last native histogram in +`v`. However, each element in `v` that contains a mix of float and native +histogram samples within the range, will be missing from the result vector. + +`increase` should only be used with counters and native histograms where the +components behave like counters. It is syntactic sugar for `rate(v)` multiplied +by the number of seconds under the specified time range window, and should be +used primarily for human readability. Use `rate` in recording rules so that +increases are tracked consistently on a per-second basis. ## `irate()` @@ -280,6 +408,8 @@ For each timeseries in `v`, `label_join(v instant-vector, dst_label string, sepa using `separator` and returns the timeseries with the label `dst_label` containing the joined value. There can be any number of `src_labels` in this function. +`label_join` acts on float and histogram samples in the same way. + This example will return a vector with each time series having a `foo` label with the value `a,b,c` added to it: ``` @@ -295,6 +425,8 @@ of `replacement`, together with the original labels in the input. Capturing grou regular expression can be referenced with `$1`, `$2`, etc. If the regular expression doesn't match then the timeseries is returned unchanged. +`label_replace` acts on float and histogram samples in the same way. + This example will return timeseries with the values `a:c` at label `service` and `a` at label `foo`: ``` @@ -358,8 +490,15 @@ over the last 5 minutes, per time series in the range vector: rate(http_requests_total{job="api-server"}[5m]) ``` -`rate` should only be used with counters. It is best suited for alerting, -and for graphing of slow-moving counters. +`rate` acts on native histograms by calculating a new histogram where each +compononent (sum and count of observations, buckets) is the rate of increase +between the respective component in the first and last native histogram in +`v`. However, each element in `v` that contains a mix of float and native +histogram samples within the range, will be missing from the result vector. + +`rate` should only be used with counters and native histograms where the +components behave like counters. It is best suited for alerting, and for +graphing of slow-moving counters. Note that when combining `rate()` with an aggregation operator (e.g. `sum()`) or a function aggregating over time (any function ending in `_over_time`), @@ -370,10 +509,21 @@ counter resets when your target restarts. For each input time series, `resets(v range-vector)` returns the number of counter resets within the provided time range as an instant vector. Any -decrease in the value between two consecutive samples is interpreted as a -counter reset. +decrease in the value between two consecutive float samples is interpreted as a +counter reset. A reset in a native histogram is detected in a more complex way: +Any decrease in any bucket, including the zero bucket, or in the count of +observation constitutes a counter reset, but also the disappearance of any +previously populated bucket, an increase in bucket resolution, or a decrease of +the zero-bucket width. -`resets` should only be used with counters. +`resets` should only be used with counters and counter-like native +histograms. + +If the range vector contains a mix of float and histogram samples for the same +series, counter resets are detected separately and their numbers added up. The +change from a float to a histogram sample is _not_ considered a counter +reset. Each float sample is compared to the next float sample, and each +histogram is comprared to the next histogram. ## `round()` @@ -395,7 +545,7 @@ have exactly one element, `scalar` will return `NaN`. ## `sort()` `sort(v instant-vector)` returns vector elements sorted by their sample values, -in ascending order. +in ascending order. Native histograms are sorted by their sum of observations. ## `sort_desc()` @@ -414,7 +564,8 @@ expression is to be evaluated. ## `timestamp()` `timestamp(v instant-vector)` returns the timestamp of each of the samples of -the given vector as the number of seconds since January 1, 1970 UTC. +the given vector as the number of seconds since January 1, 1970 UTC. It also +works with histogram samples. ## `vector()` @@ -438,12 +589,16 @@ over time and return an instant vector with per-series aggregation results: * `quantile_over_time(scalar, range-vector)`: the φ-quantile (0 ≤ φ ≤ 1) of the values in the specified interval. * `stddev_over_time(range-vector)`: the population standard deviation of the values in the specified interval. * `stdvar_over_time(range-vector)`: the population standard variance of the values in the specified interval. -* `last_over_time(range-vector)`: the most recent point value in specified interval. +* `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. Note that all values in the specified interval have the same weight in the aggregation even if the values are not equally spaced throughout the interval. +`avg_over_time`, `sum_over_time`, `count_over_time`, `last_over_time`, and +`present_over_time` handle native histograms as expected. All other functions +ignore histogram samples. + ## Trigonometric Functions The trigonometric functions work in radians: diff --git a/docs/querying/operators.md b/docs/querying/operators.md index 9c1249b464..c691a0f1a8 100644 --- a/docs/querying/operators.md +++ b/docs/querying/operators.md @@ -306,3 +306,35 @@ highest to lowest. Operators on the same precedence level are left-associative. For example, `2 * 3 % 2` is equivalent to `(2 * 3) % 2`. However `^` is right associative, so `2 ^ 3 ^ 2` is equivalent to `2 ^ (3 ^ 2)`. + +## Operators for native histograms + +Native histograms are an experimental feature. Ingesting native histograms has +to be enabled via a [feature flag](../feature_flags/#native-histograms). Once +native histograms have been ingested, they can be queried (even after the +feature flag has been disabled again). However, the operator support for native +histograms is still very limited. + +Logical/set binary operators work as expected even if histogram samples are +involved. They only check for the existence of a vector element and don't +change their behavior depending on the sample type of an element (float or +histogram). The `count` aggregation operator works similarly. + +The binary `+` and `-` operators between two native histograms and the `sum` +and `avg` aggregation operators to aggregate native histograms are fully +supported. Even if the histograms involved have different bucket layouts, the +buckets are automatically converted appropriately so that the operation can be +performed. (With the currently supported bucket schemas, that's always +possible.) If either operator has to aggregate a mix of histogram samples and +float samples, the corresponding vector element is removed from the output +vector entirely. + +The binary `*` operator works between a native histogram and a float in any +order, while the binary `/` operator can be used between a native histogram +and a float in that exact order. + +All other operators (and unmentioned cases for the above operators) do not +behave in a meaningful way. They either treat the histogram sample as if it +were a float sample of value 0, or (in case of arithmetic operations between a +scalar and a vector) they leave the histogram sample unchanged. This behavior +will change to a meaningful one before native histograms are a stable feature. diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index aae8e5d1b0..e3dd133069 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -5,7 +5,7 @@ sort_rank: 7 # Remote Read API -This is not currently considered par of the stable API and is subject to change +This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. ## Format overview @@ -65,8 +65,6 @@ Note: Names of query parameters that may be repeated end with `[]`. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). -/// Can you clarify what you mean by this? -/// https://github.com/prometheus/prometheus/pull/7266#discussion_r426456791 Can we talk a little bit how negotiation works of sampled vs streamed ? Request are made to the following endpoint. ``` @@ -74,12 +72,10 @@ Request are made to the following endpoint. ``` ### Samples -/// Does it return a message that includes a list, or does it return a list of raw samples? This returns a message that includes a list of raw samples. ### Streamed Chunks -/// This is a little much detail, the relevant point is they're the internal implementation of the chunks. These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. diff --git a/docs/stability.md b/docs/stability.md index 7bb8e31840..1fd2e51e0c 100644 --- a/docs/stability.md +++ b/docs/stability.md @@ -1,6 +1,6 @@ --- title: API Stability -sort_rank: 10 +sort_rank: 11 --- # API Stability Guarantees @@ -18,12 +18,13 @@ Things considered stable for 2.x: * Configuration file format (minus the service discovery remote read/write, see below) * Rule/alert file format * Console template syntax and semantics +* Remote write sending, per the [1.0 specification](https://prometheus.io/docs/concepts/remote_write_spec/). Things considered unstable for 2.x: * Any feature listed as experimental or subject to change, including: * The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458) - * Remote read, remote write and the remote read endpoint + * Remote write receiving, remote read and the remote read endpoint * Server-side HTTPS and basic authentication * Service discovery integrations, with the exception of `static_configs` and `file_sd_configs` * Go APIs of packages that are part of the server diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index a3136f08e8..ae656db193 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -25,10 +25,10 @@ import ( "strings" "time" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/common/model" - "gopkg.in/alecthomas/kingpin.v2" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/documentation/examples/custom-sd/adapter" diff --git a/documentation/examples/prometheus-kubernetes.yml b/documentation/examples/prometheus-kubernetes.yml index 3d3861ab4f..9a62287342 100644 --- a/documentation/examples/prometheus-kubernetes.yml +++ b/documentation/examples/prometheus-kubernetes.yml @@ -57,11 +57,6 @@ scrape_configs: regex: default;kubernetes;https # Scrape config for nodes (kubelet). - # - # Rather than connecting directly to the node, the scrape is proxied though the - # Kubernetes apiserver. This means it will work if Prometheus is running out of - # cluster, or can't connect to nodes for some other reason (e.g. because of - # firewalling). - job_name: "kubernetes-nodes" # Default to scraping over https. If required, just disable this or change to diff --git a/documentation/examples/prometheus-ovhcloud.yml b/documentation/examples/prometheus-ovhcloud.yml new file mode 100644 index 0000000000..21facad1ca --- /dev/null +++ b/documentation/examples/prometheus-ovhcloud.yml @@ -0,0 +1,16 @@ +# An example scrape configuration for running Prometheus with Ovhcloud. +scrape_configs: + - job_name: 'ovhcloud' + ovhcloud_sd_configs: + - service: vps + endpoint: ovh-eu + application_key: XXX + application_secret: XXX + consumer_key: XXX + refresh_interval: 1m + - service: dedicated_server + endpoint: ovh-eu + application_key: XXX + application_secret: XXX + consumer_key: XXX + refresh_interval: 1m diff --git a/documentation/examples/rbac-setup.yml b/documentation/examples/rbac-setup.yml index 350d3eda28..e02c0ca319 100644 --- a/documentation/examples/rbac-setup.yml +++ b/documentation/examples/rbac-setup.yml @@ -33,6 +33,15 @@ metadata: name: prometheus namespace: default --- +apiVersion: v1 +kind: Secret +metadata: + name: prometheus-sa-token + namespace: default + annotations: + kubernetes.io/service-account.name: prometheus +type: kubernetes.io/service-account-token +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go index dc2bd0e70a..48c0a9571f 100644 --- a/documentation/examples/remote_storage/example_write_adapter/server.go +++ b/documentation/examples/remote_storage/example_write_adapter/server.go @@ -49,6 +49,11 @@ func main() { } fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp) } + + for _, hp := range ts.Histograms { + h := remote.HistogramProtoToHistogram(hp) + fmt.Printf("\tHistogram: %s\n", h.String()) + } } }) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 02b4e674ab..b4c8e077d6 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -3,62 +3,68 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage go 1.18 require ( + github.com/alecthomas/kingpin/v2 v2.3.2 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/influxdata/influxdb v1.10.0 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/common v0.37.0 - github.com/stretchr/testify v1.8.0 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 + github.com/influxdata/influxdb v1.11.2 + github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/common v0.44.0 + github.com/prometheus/prometheus v0.45.0 + github.com/stretchr/testify v1.8.4 ) require ( - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.72 // indirect + github.com/aws/aws-sdk-go v1.44.276 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect - github.com/kr/pretty v0.2.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 // indirect - go.opentelemetry.io/otel v1.9.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/trace v1.9.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/goleak v1.1.12 // indirect - golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced // indirect - golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -require ( - github.com/prometheus/prometheus v0.38.0 - golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect -) - exclude ( // These excludes are needed because of some weird version collision. // Feel free to try removing them after future dependency updates. diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index f5e5ddeb8f..2ade3a1b7c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,82 +1,88 @@ -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= +github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.72 h1:i7J5XT7pjBjtl1OrdIhiQHzsG89wkZCcM1HhyK++3DI= -github.com/aws/aws-sdk-go v1.44.72/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= +github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.82.0 h1:lqAit46H1CqJGjh7LDbsamng/UMBME5rvmfH3Vb5Yy8= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= -github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -88,39 +94,43 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= -github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.14.0 h1:Y64GIJ8hYTu+tuGekwO4G4ardXoiCivX9wv1iP/kihk= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/nomad/api v0.0.0-20220809212729-939d643fec2c h1:lV5A4cLQr1Bh1xGSSQ2R0fDRK4GZnfXxYia4Q7aaTXc= -github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/hetznercloud/hcloud-go v1.35.2 h1:eEDtmDiI2plZ2UQmj4YpiYse5XbtpXOUBpAdIOLxzgE= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE= -github.com/influxdata/influxdb v1.10.0/go.mod h1:IVPuoA2pOOxau/NguX7ipW0Jp9Bn+dMWlo0+VOscevU= -github.com/ionos-cloud/sdk-go/v6 v6.1.2 h1:es5R5sVmjHFrYNBbJfAeHF+16GheaJMyc63xWxIAec4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo= +github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -132,30 +142,30 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linode/linodego v1.8.0 h1:7B2UaWu6C48tZZZrtINWRElAcwzk4TLnL9USjKf3xm0= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -164,7 +174,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= @@ -172,6 +181,9 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -182,78 +194,78 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.38.0 h1:YSiJ5gDZmXnOntPRyHn1wb/6I1Frasj9dw57XowIqeA= -github.com/prometheus/prometheus v0.38.0/go.mod h1:2zHO5FtRhM+iu995gwKIb99EXxjeZEuXpKUTIRq4YI0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI= +github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 h1:9NkMW03wwEzPtP/KciZ4Ozu/Uz5ZA7kfqXJIObnrjGU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0/go.mod h1:548ZsYzmT4PL4zWKRd8q/N4z0Wxzn/ZxUE+lkEpwWQA= -go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw= -go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo= -go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs= -go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= -go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc= -go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -263,25 +275,24 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced h1:3dYNDff0VT5xj+mbj2XucFst9WKk6PdGOrb9n+SbIvw= -golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw= -golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -294,33 +305,35 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs= -golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -329,8 +342,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276 h1:7PEE9xCtufpGJzrqweakEEnTh7YFELmnKm/ee+5jmfQ= -google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -339,16 +352,14 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -360,13 +371,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY= -k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg= -k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.70.0 h1:GMmmjoFOrNepPN0ZeGCzvD2Gh5IKRwdFx8W5PBxVTQU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go index fffbc9c2ae..e84ed9e129 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go @@ -184,11 +184,11 @@ func (c *Client) buildCommand(q *prompb.Query) (string, error) { } func escapeSingleQuotes(str string) string { - return strings.Replace(str, `'`, `\'`, -1) + return strings.ReplaceAll(str, `'`, `\'`) } func escapeSlashes(str string) string { - return strings.Replace(str, `/`, `\/`, -1) + return strings.ReplaceAll(str, `/`, `\/`) } func mergeResult(labelsToSeries map[string]*prompb.TimeSeries, results []influx.Result) error { @@ -290,13 +290,14 @@ func mergeSamples(a, b []prompb.Sample) []prompb.Sample { result := make([]prompb.Sample, 0, len(a)+len(b)) i, j := 0, 0 for i < len(a) && j < len(b) { - if a[i].Timestamp < b[j].Timestamp { + switch { + case a[i].Timestamp < b[j].Timestamp: result = append(result, a[i]) i++ - } else if a[i].Timestamp > b[j].Timestamp { + case a[i].Timestamp > b[j].Timestamp: result = append(result, b[j]) j++ - } else { + default: result = append(result, a[i]) i++ j++ diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index eac2ec409c..85ef1839a1 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" @@ -35,7 +36,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" - "gopkg.in/alecthomas/kingpin.v2" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb" diff --git a/documentation/images/architecture.svg b/documentation/images/architecture.svg index df93e13cb2..4e1e85995d 100644 --- a/documentation/images/architecture.svg +++ b/documentation/images/architecture.svg @@ -1,2 +1,4 @@ + + -
pull metrics


[Not supported by viewer]
HDD / SSD
[Not supported by viewer]
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
[Not supported by viewer]
Node
<b>Node</b>
Service Discovery

Service Discovery<div><br></div>
             find 
                 targets
[Not supported by viewer]
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


[Not supported by viewer]
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
[Not supported by viewer]
              notify
[Not supported by viewer]
...
...
\ No newline at end of file +
pull metrics


pull metrics...
HDD / SSD
HDD / SSD
Pushgateway
Pushgateway
Short-lived jobs
Short-lived jobs
Jobs / Exporters
Jobs / Exporters
Storage
Storage
Retrieval
Retrieval
PromQL
PromQL
Prometheus Server
Prometheus Server
Node
Node
Service Discovery

Service Discovery
             find 
                 targets
find...
Prometheus Server
Prometheus Server
Alertmanager
Alertmanager
push alerts         


push alerts...
Web UI
Web UI
Grafana
Grafana
API clients
API clients
PagerDuty
PagerDuty
Email
Email
  • DNS
  • Kubernetes
  • Consul
  • ...
  • Custom integration
DNSKubernetesConsul...Custom in...
              notify
              notify
...
...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/images/internal_architecture.svg b/documentation/images/internal_architecture.svg index 5948186a7d..1242548ddb 100644 --- a/documentation/images/internal_architecture.svg +++ b/documentation/images/internal_architecture.svg @@ -1,2 +1,4 @@ + + -
Fanout Storage
Fanout Storage
read/write
series data 
[Not supported by viewer]
Local
Storage
Local<br>Storage
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
[Not supported by viewer]
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Discovery
update
Alertmanager
targets
[Not supported by viewer]
Alertmanagers
Alertmanagers
send
alerts
[Not supported by viewer]
read/write
series data
[Not supported by viewer]
scrape
metrics
[Not supported by viewer]
Scrape Manager
Scrape Manager
append 
samples 
[Not supported by viewer]
update scrape targets
[Not supported by viewer]
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discovery
query
query
PromQL
[Not supported by viewer]
read series data
read series data
Remote Read Endpoints
Remote Read Endpoints<br>
Remote Write Endpoints
Remote Write Endpoints
Targets
Targets
Service Discovery
Service Discovery
Web API/UI
Web API/UI
query
query
PromQL
[Not supported by viewer]
Reload Handler
Reload Handler
Termination
Handler
Termination<br>Handler<br>
(most other
components)
[Not supported by viewer]
reload
reload
terminate
terminate<br>
view/control
view/control
view/control
view/control
Web Clients
Web Clients
Prometheus Server
<b>Prometheus Server</b>
\ No newline at end of file +
Fanout Storage
Fanout Storage
read/write
series data 
read/write...
Local
Storage
Local...
disk
disk
write series data
write series data
Remote Storage
Remote Storage
send
alerts
send...
append rule results
append rule results
Rule Manager
Rule Manager
Notifier
Notifier
discover Alertmanager targets
discover Alertmanager targets
Notifier Discovery
Notifier Disco...
update
Alertmanager
targets
update...
Alertmanagers
Alertmanagers
send
alerts
send...
read/write
series data
read/write...
scrape
metrics
scrape...
Scrape Manager
Scrape Manager
append 
samples 
append...
update scrape targets
update scrape targets
discover scrape targets
discover scrape targets
Scrape Discovery
Scrape Discove...
query
query
PromQL
PromQL
read series data
read series data
Remote Read Endpoints
Remote Read En...
Remote Write Endpoints
Remote Write E...
Targets
Targets
Service Discovery
Service Discov...
Web API/UI
Web API/UI
query
query
PromQL
PromQL
Reload Handler
Reload Handler
Termination
Handler
Termination...
(most other
components)
(most other...
reload
reload
terminate
termin...
view/control
view/c...
view/control
view/control
Web Clients
Web Clients
Prometheus Server
Prometheus Server
Text is not SVG - cannot display
\ No newline at end of file diff --git a/documentation/images/prometheus-logo.svg b/documentation/images/prometheus-logo.svg new file mode 100644 index 0000000000..026f9e5bcc --- /dev/null +++ b/documentation/images/prometheus-logo.svg @@ -0,0 +1,50 @@ + + + +image/svg+xml diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index 0ee5d83c7a..3efb0f27d1 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -20,6 +20,20 @@ description: 'Prometheus %(prometheusName)s has failed to reload its configuration.' % $._config, }, }, + { + alert: 'PrometheusSDRefreshFailure', + expr: ||| + increase(prometheus_sd_refresh_failures_total{%(prometheusSelector)s}[10m]) > 0 + ||| % $._config, + 'for': '20m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Failed Prometheus SD refresh.', + description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config, + }, + }, { alert: 'PrometheusNotificationQueueRunningFull', expr: ||| diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index b95f13e0a0..d99dac35d1 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -27,7 +27,7 @@ local template = grafana.template; instance: { alias: 'Instance' }, version: { alias: 'Version' }, 'Value #A': { alias: 'Count', type: 'hidden' }, - 'Value #B': { alias: 'Uptime' }, + 'Value #B': { alias: 'Uptime', type: 'number', unit: 's' }, }) ) ) @@ -314,7 +314,7 @@ local template = grafana.template; template.new( 'cluster', '$datasource', - 'label_values(kube_pod_container_info{image=~".*prometheus.*"}, cluster)' % $._config, + 'label_values(prometheus_build_info, cluster)' % $._config, refresh='time', current={ selected: true, diff --git a/go.mod b/go.mod index 071bef831b..4597ee4201 100644 --- a/go.mod +++ b/go.mod @@ -1,153 +1,166 @@ module github.com/prometheus/prometheus -go 1.18 +go 1.19 require ( github.com/Azure/azure-sdk-for-go v65.0.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.28 - github.com/Azure/go-autorest/autorest/adal v0.9.21 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/go-autorest/autorest v0.11.29 + github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/alecthomas/kingpin/v2 v2.3.2 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.44.109 - github.com/cespare/xxhash/v2 v2.1.2 + github.com/aws/aws-sdk-go v1.44.284 + github.com/cespare/xxhash/v2 v2.2.0 github.com/dennwc/varint v1.0.0 - github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 - github.com/digitalocean/godo v1.84.1 - github.com/docker/docker v20.10.18+incompatible + github.com/digitalocean/godo v1.99.0 + github.com/docker/docker v24.0.2+incompatible github.com/edsrzf/mmap-go v1.1.0 - github.com/envoyproxy/go-control-plane v0.10.3 - github.com/envoyproxy/protoc-gen-validate v0.6.8 - github.com/fsnotify/fsnotify v1.5.4 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f + github.com/envoyproxy/protoc-gen-validate v1.0.1 + github.com/fsnotify/fsnotify v1.6.0 github.com/go-kit/log v0.2.1 - github.com/go-logfmt/logfmt v0.5.1 - github.com/go-openapi/strfmt v0.21.3 + github.com/go-logfmt/logfmt v0.6.0 + github.com/go-openapi/strfmt v0.21.7 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 - github.com/gophercloud/gophercloud v1.0.0 - github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 + github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 + github.com/gophercloud/gophercloud v1.4.0 + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.15.2 - github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf - github.com/hetznercloud/hcloud-go v1.35.3 - github.com/ionos-cloud/sdk-go/v6 v6.1.3 + github.com/hashicorp/consul/api v1.21.0 + github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f + github.com/hetznercloud/hcloud-go v1.47.0 + github.com/ionos-cloud/sdk-go/v6 v6.1.7 github.com/json-iterator/go v1.1.12 - github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd - github.com/linode/linodego v1.9.3 - github.com/miekg/dns v1.1.50 + github.com/klauspost/compress v1.15.12 + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b + github.com/linode/linodego v1.17.2 + github.com/miekg/dns v1.1.54 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 + github.com/ovh/go-ovh v1.4.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/alertmanager v0.24.0 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.37.0 + github.com/prometheus/alertmanager v0.25.0 + github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_model v0.4.0 + github.com/prometheus/common v0.44.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.7.1 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 + github.com/prometheus/exporter-toolkit v0.10.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.4 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/trace v1.10.0 - go.uber.org/atomic v1.10.0 - go.uber.org/automaxprocs v1.5.1 - go.uber.org/goleak v1.2.0 - golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 - golang.org/x/sync v0.0.0-20220907140024-f12130a52804 - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 - golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 - golang.org/x/tools v0.1.12 - google.golang.org/api v0.96.0 - google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 - google.golang.org/grpc v1.49.0 - google.golang.org/protobuf v1.28.1 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 + go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 + go.uber.org/atomic v1.11.0 + go.uber.org/automaxprocs v1.5.2 + go.uber.org/goleak v1.2.1 + golang.org/x/net v0.11.0 + golang.org/x/oauth2 v0.9.0 + golang.org/x/sync v0.2.0 + golang.org/x/sys v0.9.0 + golang.org/x/time v0.3.0 + golang.org/x/tools v0.9.3 + google.golang.org/api v0.114.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.56.1 + google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.25.2 - k8s.io/apimachinery v0.25.2 - k8s.io/client-go v0.25.2 + k8s.io/api v0.26.2 + k8s.io/apimachinery v0.26.2 + k8s.io/client-go v0.26.2 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.80.0 + k8s.io/klog/v2 v2.100.1 ) require ( - cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect +) + +require ( + cloud.google.com/go/compute v1.19.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Microsoft/go-winio v0.5.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/armon/go-metrics v0.3.10 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect - github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-resty/resty/v2 v2.7.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/google/uuid v1.3.0 + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/hashicorp/cronexpr v1.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v0.14.1 // indirect - github.com/hashicorp/go-immutable-radix v1.3.0 // indirect - github.com/hashicorp/go-retryablehttp v0.7.1 // indirect + github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/serf v0.9.7 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect @@ -158,27 +171,25 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.mongodb.org/mongo-driver v1.10.2 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.32.0 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/crypto v0.10.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 + golang.org/x/mod v0.10.0 // indirect + golang.org/x/term v0.9.0 // indirect + golang.org/x/text v0.10.0 // indirect google.golang.org/appengine v1.6.7 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 17b06cc4b7..57f9f82c69 100644 --- a/go.sum +++ b/go.sum @@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -13,38 +12,20 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -54,20 +35,24 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -81,23 +66,24 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -109,37 +95,35 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI= -github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.284 h1:Oc5Kubi43/VCkerlt3ZU3KpBju6BpNkoG3s7E8vj/O8= +github.com/aws/aws-sdk-go v1.44.284/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -148,20 +132,20 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -173,19 +157,17 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= -github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo= -github.com/digitalocean/godo v1.84.1/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -194,38 +176,35 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= -github.com/envoyproxy/protoc-gen-validate v0.6.8/go.mod h1:0ZMblUx0cxNoWRswEEXoj9kHBmqX8pxGweMiyIAfR6A= +github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -233,59 +212,61 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -312,8 +293,7 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -321,11 +301,12 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -339,8 +320,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -356,19 +335,18 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -381,9 +359,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -391,8 +368,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -400,37 +375,22 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE= -github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= +github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k= -github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU= +github.com/gophercloud/gophercloud v1.4.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -438,90 +398,84 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= -github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 h1:1JYBfzqrWPcCclBwxFCPAou9n+q86mfnu7NAeHfte7A= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0/go.mod h1:YDZoGHuwE+ov0c8smSH49WLF3F2LaWnYYuDVd+EWrc0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= -github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= +github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= +github.com/hashicorp/consul/api v1.21.0/go.mod h1:f8zVJwBcLdr1IQnfdfszjUM0xzp31Zl3bpws3pL9uFM= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= -github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= -github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= -github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= -github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU= -github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf/go.mod h1:Z0U0rpbh4Qlkgqu3iRDcfJBA+r3FgoeD1BfigmZhfzM= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4= +github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f/go.mod h1:Xjd3OXUTfsWbCCBsQd3EdfPTz5evDi+fxqdvpN+WqQg= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= -github.com/hetznercloud/hcloud-go v1.35.3/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go v1.47.0 h1:WMZDwLPtMZwOLWIgERHrrrTzRFdHx0hTygYVQ4VWHW4= +github.com/hetznercloud/hcloud-go v1.47.0/go.mod h1:zSpmBnxIdb5oMdbpVg1Q977Cq2qiwERkjj3jqRbHH5U= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= -github.com/ionos-cloud/sdk-go/v6 v6.1.3/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw= +github.com/ionos-cloud/sdk-go/v6 v6.1.7/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -553,29 +507,28 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0= -github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= -github.com/linode/linodego v1.9.3/go.mod h1:h6AuFR/JpqwwM/vkj7s8KV3iGN8/jxn+zc437F8SZ8w= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/linode/linodego v1.17.2 h1:b32dj4662PGG5P9qVa6nBezccWdqgukndlMIuPGq1CQ= +github.com/linode/linodego v1.17.2/go.mod h1:C2iyT3Vg2O2sPxkWka4XAQ5WSUtm5LmTZ3Adw43Ra7Q= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -587,27 +540,29 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -615,14 +570,12 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -660,9 +613,9 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -672,11 +625,12 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -686,20 +640,20 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= -github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= +github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= +github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -707,16 +661,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= @@ -724,45 +678,40 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= -github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= +github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shoenig/test v0.3.1 h1:dhGZztS6nQuvJ0o0RtUiQHaEO4hhArh/WmWwik3Ols0= +github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs= github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= github.com/simonpasquier/klog-gokit/v3 v3.0.0 h1:J0QrVhAULISHWN05PeXX/xMqJBjnpl2fAuO8uHdQGsA= @@ -772,19 +721,11 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -796,8 +737,9 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -805,8 +747,12 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -820,24 +766,25 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k= -go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -845,40 +792,37 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs= -go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= -go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0 h1:S8DedULB3gp93Rh+9Z+7NTEv+6Id/KYS7LDyipZ9iCE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0/go.mod h1:5WV40MLWwvWlGP7Xm8g3pMcg0pKOUY609qxJn8y7LmM= -go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k= -go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY= -go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= -go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= -go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= -go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= +go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -891,20 +835,17 @@ golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaE golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -915,8 +856,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -929,9 +870,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -940,13 +878,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -982,58 +916,29 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8= -golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1046,10 +951,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1075,7 +979,6 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1096,74 +999,54 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs= -golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1215,32 +1098,14 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -1256,31 +1121,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.96.0 h1:F60cuQPJq7K7FzsxMYHAUJSiXh2oKctHxBMbDygxhfM= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1319,61 +1161,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1390,29 +1181,13 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1426,10 +1201,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1443,10 +1216,9 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1463,6 +1235,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1476,21 +1249,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= -k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= -k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs= -k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA= -k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= -k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go new file mode 100644 index 0000000000..2ced090167 --- /dev/null +++ b/model/histogram/float_histogram.go @@ -0,0 +1,979 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "strings" +) + +// FloatHistogram is similar to Histogram but uses float64 for all +// counts. Additionally, bucket counts are absolute and not deltas. +// +// A FloatHistogram is needed by PromQL to handle operations that might result +// in fractional counts. Since the counts in a histogram are unlikely to be too +// large to be represented precisely by a float64, a FloatHistogram can also be +// used to represent a histogram with integer counts and thus serves as a more +// generalized representation. +type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. Must be zero or positive. + ZeroCount float64 + // Total number of observations. Must be zero or positive. + Count float64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. Each represents an absolute count and + // must be zero or positive. + PositiveBuckets, NegativeBuckets []float64 +} + +// Copy returns a deep copy of the Histogram. +func (h *FloatHistogram) Copy() *FloatHistogram { + c := *h + + if h.PositiveSpans != nil { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if h.NegativeSpans != nil { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if h.PositiveBuckets != nil { + c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if h.NegativeBuckets != nil { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// CopyToSchema works like Copy, but the returned deep copy has the provided +// target schema, which must be ≤ the original schema (i.e. it must have a lower +// resolution). +func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { + if targetSchema == h.Schema { + // Fast path. + return h.Copy() + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) + } + c := FloatHistogram{ + Schema: targetSchema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.ZeroCount, + Count: h.Count, + Sum: h.Sum, + } + + // TODO(beorn7): This is a straight-forward implementation using merging + // iterators for the original buckets and then adding one merged bucket + // after another to the newly created FloatHistogram. It's well possible + // that a more involved implementation performs much better, which we + // could do if this code path turns out to be performance-critical. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); { + b := it.At() + c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); { + b := it.At() + c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *FloatHistogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[float64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + return Bucket[float64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all +// bucket counts including the zero bucket and the count and the sum of +// observations. The bucket layout stays the same. This method changes the +// receiving histogram directly (rather than acting on a copy). It returns a +// pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { + h.ZeroCount *= factor + h.Count *= factor + h.Sum *= factor + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] *= factor + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] *= factor + } + return h +} + +// Div works like Scale but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + +// Add adds the provided other histogram to the receiving histogram. Count, Sum, +// and buckets from the other histogram are added to the corresponding +// components of the receiving histogram. Buckets in the other histogram that do +// not exist in the receiving histogram are inserted into the latter. The +// resulting histogram might have buckets with a population of zero or directly +// adjacent spans (offset=0). To normalize those, call the Compact method. +// +// The method reconciles differences in the zero threshold and in the schema, +// but the schema of the other histogram must be ≥ the schema of the receiving +// histogram (i.e. must have an equal or higher resolution). This means that the +// schema of the receiving histogram won't change. Its zero threshold, however, +// will change if needed. The other histogram will not be modified in any case. +// +// This method returns a pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and isse a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + h.Count += other.Count + h.Sum += other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// Sub works like Add but subtracts the other histogram. +func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + h.Count -= other.Count + h.Sum -= other.Sum + + // TODO(beorn7): If needed, this can be optimized by inspecting the + // spans in other and create missing buckets in h in batches. + var iInSpan, index int32 + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); { + b := it.At() + b.Count *= -1 + h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket( + b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index, + ) + index = b.Index + } + return h +} + +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + +// addBucket takes the "coordinates" of the last bucket that was handled and +// adds the provided bucket after it. If a corresponding bucket exists, the +// count is added. If not, the bucket is inserted. The updated slices and the +// coordinates of the inserted or added-to bucket are returned. +func addBucket( + b Bucket[float64], + spans []Span, buckets []float64, + iSpan, iBucket int, + iInSpan, index int32, +) ( + newSpans []Span, newBuckets []float64, + newISpan, newIBucket int, newIInSpan int32, +) { + if iSpan == -1 { + // First add, check if it is before all spans. + if len(spans) == 0 || spans[0].Offset > b.Index { + // Add bucket before all others. + buckets = append(buckets, 0) + copy(buckets[1:], buckets) + buckets[0] = b.Count + if len(spans) > 0 && spans[0].Offset == b.Index+1 { + spans[0].Length++ + spans[0].Offset-- + return spans, buckets, 0, 0, 0 + } + spans = append(spans, Span{}) + copy(spans[1:], spans) + spans[0] = Span{Offset: b.Index, Length: 1} + if len(spans) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spans[1].Offset -= b.Index + 1 + } + return spans, buckets, 0, 0, 0 + } + if spans[0].Offset == b.Index { + // Just add to first bucket. + buckets[0] += b.Count + return spans, buckets, 0, 0, 0 + } + // We are behind the first bucket, so set everything to the + // first bucket and continue normally. + iSpan, iBucket, iInSpan = 0, 0, 0 + index = spans[0].Offset + } + deltaIndex := b.Index - index + for { + remainingInSpan := int32(spans[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + buckets[iBucket] += b.Count + return spans, buckets, iSpan, iBucket, iInSpan + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + buckets = append(buckets, 0) + copy(buckets[iBucket+1:], buckets[iBucket:]) + buckets[iBucket] = b.Count + if deltaIndex == 0 { + // Directly after previous span, extend previous span. + if iSpan < len(spans) { + spans[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spans[iSpan].Length) + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 { + // Directly before next span, extend next span. + iInSpan = 0 + spans[iSpan].Offset-- + spans[iSpan].Length++ + return spans, buckets, iSpan, iBucket, iInSpan + } + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spans) { + spans[iSpan].Offset -= deltaIndex + 1 + } + spans = append(spans, Span{}) + copy(spans[iSpan+1:], spans[iSpan:]) + spans[iSpan] = Span{Length: 1, Offset: deltaIndex} + return spans, buckets, iSpan, iBucket, iInSpan + } + // Try start of next span. + deltaIndex -= spans[iSpan].Offset + iInSpan = 0 + } +} + +// Compact eliminates empty buckets at the beginning and end of each span, then +// merges spans that are consecutive or at most maxEmptyBuckets apart, and +// finally splits spans that contain more consecutive empty buckets than +// maxEmptyBuckets. (The actual implementation might do something more efficient +// but with the same result.) The compaction happens "in place" in the +// receiving histogram, but a pointer to it is returned for convenience. +// +// The ideal value for maxEmptyBuckets depends on circumstances. The motivation +// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to +// represent very few empty buckets explicitly within one span than cutting the +// one span into two to treat the empty buckets as a gap between the two spans, +// both in terms of storage requirement as well as in terms of encoding and +// decoding effort. However, the tradeoffs are subtle. For one, they are +// different in the exposition format vs. in a TSDB chunk vs. for the in-memory +// representation as Go types. In the TSDB, as an additional aspects, the span +// layout is only stored once per chunk, while many histograms with that same +// chunk layout are then only stored with their buckets (so that even a single +// empty bucket will be stored many times). +// +// For the Go types, an additional Span takes 8 bytes. Similarly, an additional +// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both +// options have the same storage requirement, but the single-span solution is +// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0 +// and only use a larger number if you know what you are doing. +func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false, + ) + return h +} + +// DetectReset returns true if the receiving histogram is missing any buckets +// that have a non-zero population in the provided previous histogram. It also +// returns true if any count (in any bucket, in the zero count, or in the count +// of observations, but NOT the sum of observations) is smaller in the receiving +// histogram compared to the previous histogram. Otherwise, it returns false. +// +// This method will shortcut to true if a CounterReset is detected, and shortcut +// to false if NotCounterReset is detected. Otherwise it will do the work to detect +// a reset. +// +// Special behavior in case the Schema or the ZeroThreshold are not the same in +// both histograms: +// +// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an +// increase of resolution) can only happen together with a reset. Thus, the +// method returns true in either case. +// +// - Upon an increase of the ZeroThreshold, the buckets in the previous +// histogram that fall within the new ZeroThreshold are added to the ZeroCount +// of the previous histogram (without mutating the provided previous +// histogram). The scenario that a populated bucket of the previous histogram +// is partially within, partially outside of the new ZeroThreshold, can only +// happen together with a counter reset and therefore shortcuts to returning +// true. +// +// - Upon a decrease of the Schema, the buckets of the previous histogram are +// merged so that they match the new, lower-resolution schema (again without +// mutating the provided previous histogram). +func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.CounterResetHint == CounterReset { + return true + } + if h.CounterResetHint == NotCounterReset { + return false + } + // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType), + // we go on as we would otherwise, for reasons explained below. + // + // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes + // with a counter reset. Therefore, we have to do all the detailed work to find out if there + // is a counter reset or not. + // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still + // allows the user to apply functions to gauge histograms that are only meant for counter histograms. + // In this case, we treat the gauge histograms as a counter histograms + // (and we plan to return a warning about it to the user). + if h.Count < previous.Count { + return true + } + if h.Schema > previous.Schema { + return true + } + if h.ZeroThreshold < previous.ZeroThreshold { + // ZeroThreshold decreased. + return true + } + previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold) + if newThreshold != h.ZeroThreshold { + // ZeroThreshold is within a populated bucket in previous + // histogram. + return true + } + if h.ZeroCount < previousZeroCount { + return true + } + currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + if detectReset(currIt, prevIt) { + return true + } + currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + return detectReset(currIt, prevIt) +} + +func detectReset(currIt, prevIt BucketIterator[float64]) bool { + if !prevIt.Next() { + return false // If no buckets in previous histogram, nothing can be reset. + } + prevBucket := prevIt.At() + if !currIt.Next() { + // No bucket in current, but at least one in previous + // histogram. Check if any of those are non-zero, in which case + // this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket := currIt.At() + for { + // Forward currIt until we find the bucket corresponding to prevBucket. + for currBucket.Index < prevBucket.Index { + if !currIt.Next() { + // Reached end of currIt early, therefore + // previous histogram has a bucket that the + // current one does not have. Unlass all + // remaining buckets in the previous histogram + // are unpopulated, this is a reset. + for { + if prevBucket.Count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket = currIt.At() + } + if currBucket.Index > prevBucket.Index { + // Previous histogram has a bucket the current one does + // not have. If it's populated, it's a reset. + if prevBucket.Count != 0 { + return true + } + } else { + // We have reached corresponding buckets in both iterators. + // We can finally compare the counts. + if currBucket.Count < prevBucket.Count { + return true + } + } + if !prevIt.Next() { + // Reached end of prevIt without finding offending buckets. + return false + } + prevBucket = prevIt.At() + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(true, 0, h.Schema) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going +// down). +func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { + return h.floatBucketIterator(false, 0, h.Schema) +} + +// PositiveReverseBucketIterator returns a BucketIterator to iterate over all +// positive buckets in descending order (starting at the highest bucket and +// going down towards the zero bucket). +func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeReverseBucketIterator returns a BucketIterator to iterate over all +// negative buckets in ascending order (starting at the lowest bucket and going +// up towards the zero bucket). +func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { + return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// AllBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in ascending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: h.NegativeReverseBucketIterator(), + rightIter: h.PositiveBucketIterator(), + state: -1, + } +} + +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: h.PositiveReverseBucketIterator(), + rightIter: h.NegativeBucketIterator(), + state: -1, + } +} + +// zeroCountForLargerThreshold returns what the histogram's zero count would be +// if the ZeroThreshold had the provided larger (or equal) value. If the +// provided value is less than the histogram's ZeroThreshold, the method panics. +// If the largerThreshold ends up within a populated bucket of the histogram, it +// is adjusted upwards to the lower limit of that bucket (all in terms of +// absolute values) and that bucket's count is included in the returned +// count. The adjusted threshold is returned, too. +func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) { + // Fast path. + if largerThreshold == h.ZeroThreshold { + return h.ZeroCount, largerThreshold + } + if largerThreshold < h.ZeroThreshold { + panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold)) + } +outer: + for { + count = h.ZeroCount + i := h.PositiveBucketIterator() + for i.Next() { + b := i.At() + if b.Lower >= largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Upper > largerThreshold { + // New threshold ended up within a bucket. if it's + // populated, we need to adjust largerThreshold before + // we are done here. + if b.Count != 0 { + largerThreshold = b.Upper + } + break + } + } + i = h.NegativeBucketIterator() + for i.Next() { + b := i.At() + if b.Upper <= -largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Lower < -largerThreshold { + // New threshold ended up within a bucket. If + // it's populated, we need to adjust + // largerThreshold and have to redo the whole + // thing because the treatment of the positive + // buckets is invalid now. + if b.Count != 0 { + largerThreshold = -b.Lower + continue outer + } + break + } + } + return count, largerThreshold + } +} + +// trimBucketsInZeroBucket removes all buckets that are within the zero +// bucket. It assumes that the zero threshold is at a bucket boundary and that +// the counts in the buckets to remove are already part of the zero count. +func (h *FloatHistogram) trimBucketsInZeroBucket() { + i := h.PositiveBucketIterator() + bucketsIdx := 0 + for i.Next() { + b := i.At() + if b.Lower >= h.ZeroThreshold { + break + } + h.PositiveBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + i = h.NegativeBucketIterator() + bucketsIdx = 0 + for i.Next() { + b := i.At() + if b.Upper <= -h.ZeroThreshold { + break + } + h.NegativeBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + // We are abusing Compact to trim the buckets set to zero + // above. Premature compacting could cause additional cost, but this + // code path is probably rarely used anyway. + h.Compact(0) +} + +// reconcileZeroBuckets finds a zero bucket large enough to include the zero +// buckets of both histograms (the receiving histogram and the other histogram) +// with a zero threshold that is not within a populated bucket in either +// histogram. This method modifies the receiving histogram accourdingly, but +// leaves the other histogram as is. Instead, it returns the zero count the +// other histogram would have if it were modified. +func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { + otherZeroCount := other.ZeroCount + otherZeroThreshold := other.ZeroThreshold + + for otherZeroThreshold != h.ZeroThreshold { + if h.ZeroThreshold > otherZeroThreshold { + otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold) + } + if otherZeroThreshold > h.ZeroThreshold { + h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold) + h.trimBucketsInZeroBucket() + } + } + return otherZeroCount +} + +// floatBucketIterator is a low-level constructor for bucket iterators. +// +// If positive is true, the returned iterator iterates through the positive +// buckets, otherwise through the negative buckets. +// +// If absoluteStartValue is < the lowest absolute value of any upper bucket +// boundary, the iterator starts with the first bucket. Otherwise, it will skip +// all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. +// +// targetSchema must be ≤ the schema of FloatHistogram (and of course within the +// legal values for schemas in general). The buckets are merged to match the +// targetSchema prior to iterating (without mutating FloatHistogram). +func (h *FloatHistogram) floatBucketIterator( + positive bool, absoluteStartValue float64, targetSchema int32, +) *floatBucketIterator { + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) + } + i := &floatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: h.Schema, + positive: positive, + }, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + } + if positive { + i.spans = h.PositiveSpans + i.buckets = h.PositiveBuckets + } else { + i.spans = h.NegativeSpans + i.buckets = h.NegativeBuckets + } + return i +} + +// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators. +func newReverseFloatBucketIterator( + spans []Span, buckets []float64, schema int32, positive bool, +) *reverseFloatBucketIterator { + r := &reverseFloatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + }, + } + + r.spansIdx = len(r.spans) - 1 + r.bucketsIdx = len(r.buckets) - 1 + if r.spansIdx >= 0 { + r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1 + } + r.currIdx = 0 + for _, s := range r.spans { + r.currIdx += s.Offset + int32(s.Length) + } + + return r +} + +type floatBucketIterator struct { + baseBucketIterator[float64, float64] + + targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. + origIdx int32 // The bucket index within the original schema. + absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. +} + +func (i *floatBucketIterator) Next() bool { + if i.spansIdx >= len(i.spans) { + return false + } + + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + +mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := i.targetIdx(origIdx) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } + } + // Skip buckets before absoluteStartValue. + // TODO(beorn7): Maybe do something more efficient than this recursive call. + if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + return i.Next() + } + return true +} + +// targetIdx returns the bucket index within i.targetSchema for the given bucket +// index within i.schema. +func (i *floatBucketIterator) targetIdx(idx int32) int32 { + if i.schema == i.targetSchema { + // Fast path for the common case. The below would yield the same + // result, just with more effort. + return idx + } + return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 +} + +type reverseFloatBucketIterator struct { + baseBucketIterator[float64, float64] + idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. +} + +func (i *reverseFloatBucketIterator) Next() bool { + i.currIdx-- + if i.bucketsIdx < 0 { + return false + } + + for i.idxInSpan < 0 { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + i.spansIdx-- + i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1 + i.currIdx -= i.spans[i.spansIdx+1].Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.bucketsIdx-- + i.idxInSpan-- + return true +} + +type allFloatBucketIterator struct { + h *FloatHistogram + leftIter, rightIter BucketIterator[float64] + // -1 means we are iterating negative buckets. + // 0 means it is time for the zero bucket. + // 1 means we are iterating positive buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allFloatBucketIterator) Next() bool { + switch i.state { + case -1: + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = 1 + if i.h.ZeroCount > 0 { + i.currBucket = Bucket[float64]{ + Lower: -i.h.ZeroThreshold, + Upper: i.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: i.h.ZeroCount, + // Index is irrelevant for the zero bucket. + } + return true + } + return i.Next() + case 1: + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go new file mode 100644 index 0000000000..ce749b7101 --- /dev/null +++ b/model/histogram/float_histogram_test.go @@ -0,0 +1,2207 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFloatHistogramMul(t *testing.T) { + cases := []struct { + name string + in *FloatHistogram + scale float64 + expected *FloatHistogram + }{ + { + "zero value", + &FloatHistogram{}, + 3.1415, + &FloatHistogram{}, + }, + { + "zero multiplier", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 0, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 0, + Count: 0, + Sum: 0, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{0, 0, 0, 0}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{0, 0, 0, 0}, + }, + }, + { + "no-op", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + }, + { + "double", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 3493.3 * 2, + Sum: 2349209.324 * 2, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{2, 6.6, 8.4, 0.2}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{6.2, 6, 1.234e5 * 2, 2000}, + }, + }, + { + "triple", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + 3, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 33, + Count: 90, + Sum: 69, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{3, 0, 9, 12, 21}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{9, 3, 15, 18}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in.Mul(c.scale)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.in) + }) + } +} + +func TestFloatHistogramDiv(t *testing.T) { + cases := []struct { + name string + fh *FloatHistogram + s float64 + expected *FloatHistogram + }{ + { + "zero value", + &FloatHistogram{}, + 3.1415, + &FloatHistogram{}, + }, + { + "zero divisor", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 0, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: math.Inf(1), + Count: math.Inf(1), + Sum: math.Inf(1), + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1), math.Inf(1)}, + }, + }, + { + "no-op", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + }, + { + "half", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + 2, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 15, + Sum: 11.5, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{0.5, 0, 1.5, 2, 3.5}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{1.5, 0.5, 2.5, 3}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.fh.Div(c.s)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.fh) + }) + } +} + +func TestFloatHistogramDetectReset(t *testing.T) { + cases := []struct { + name string + previous, current *FloatHistogram + resetExpected bool + }{ + { + "zero values", + &FloatHistogram{}, + &FloatHistogram{}, + false, + }, + { + "no buckets to some buckets", + &FloatHistogram{}, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "some buckets to no buckets", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{}, + true, + }, + { + "one bucket appears, nothing else changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "one bucket disappears, nothing else changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "an unpopulated bucket disappears, nothing else changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "an unpopulated bucket at the end disappears, nothing else changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {1, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 1}, {1, 2}}, + PositiveBuckets: []float64{1, 3.3, 4.2}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "an unpopulated bucket disappears in a histogram with nothing else", + &FloatHistogram{ + PositiveSpans: []Span{{23, 1}}, + PositiveBuckets: []float64{0}, + }, + &FloatHistogram{}, + false, + }, + { + "zero count goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.6, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "zero count goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.4, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "count goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.4, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "count goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.2, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "sum goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349210, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "sum goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349200, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "one positive bucket goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.3, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "one positive bucket goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.1, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "one negative bucket goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3.1, 1.234e5, 1000}, + }, + false, + }, + { + "one negative bucket goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 2.9, 1.234e5, 1000}, + }, + true, + }, + { + "zero threshold decreases", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.009, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "zero threshold increases without touching any existing buckets", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.011, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "zero threshold increases enough to cover existing buckets", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 7.73, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{1, 3}}, + PositiveBuckets: []float64{3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "zero threshold increases into the middle of an existing buckets", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.3, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + { + "schema increases without any other changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 0, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 1, + PositiveSpans: []Span{{-5, 4}, {2, 6}}, + PositiveBuckets: []float64{0.4, 0.6, 1, 0.23, 2, 1.3, 1.2, 3, 0.05, 0.05}, + NegativeSpans: []Span{{5, 4}, {6, 4}}, + NegativeBuckets: []float64{2, 1.1, 2, 1, 0.234e5, 1e5, 500, 500}, + }, + true, + }, + { + "schema decreases without any other changes", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 1, + PositiveSpans: []Span{{-5, 4}, {2, 6}}, + PositiveBuckets: []float64{0.4, 0.6, 1, 0.23, 2, 1.3, 1.2, 3, 0.05, 0.05}, + NegativeSpans: []Span{{5, 4}, {6, 4}}, + NegativeBuckets: []float64{2, 1.1, 2, 1, 0.234e5, 1e5, 500, 500}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 0, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "schema decreases and a bucket goes up", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 1, + PositiveSpans: []Span{{-5, 4}, {2, 6}}, + PositiveBuckets: []float64{0.4, 0.6, 1, 0.23, 2, 1.3, 1.2, 3, 0.05, 0.05}, + NegativeSpans: []Span{{5, 4}, {6, 4}}, + NegativeBuckets: []float64{2, 1.1, 2, 1, 0.234e5, 1e5, 500, 500}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 0, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 4.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + false, + }, + { + "schema decreases and a bucket goes down", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 1, + PositiveSpans: []Span{{-5, 4}, {2, 6}}, + PositiveBuckets: []float64{0.4, 0.6, 1, 0.23, 2, 1.3, 1.2, 3, 0.05, 0.05}, + NegativeSpans: []Span{{5, 4}, {6, 4}}, + NegativeBuckets: []float64{2, 1.1, 2, 1, 0.234e5, 1e5, 500, 500}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 5.5, + Count: 3493.3, + Sum: 2349209.324, + Schema: 0, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 1.23, 2.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.resetExpected, c.current.DetectReset(c.previous)) + }) + } +} + +func TestFloatHistogramCompact(t *testing.T) { + cases := []struct { + name string + in *FloatHistogram + maxEmptyBuckets int + expected *FloatHistogram + }{ + { + "empty histogram", + &FloatHistogram{}, + 0, + &FloatHistogram{}, + }, + { + "nothing should happen", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, + }, + }, + { + "eliminate zero offsets", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {0, 2}, {2, 1}, {0, 1}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 5}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 4}, {2, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "eliminate zero length", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {0, 0}, {2, 0}, {1, 4}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "eliminate multiple zero length spans", + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {9, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + }, + }, + { + "cut empty buckets at start or end", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 4}, {5, 3}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4, 0}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "cut empty buckets at start and end", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 4}, {5, 6}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3, 0, 0, 0}, + NegativeSpans: []Span{{-2, 4}, {3, 5}}, + NegativeBuckets: []float64{0, 0, 3.1, 3, 1.234e5, 1000, 3, 4, 0}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "cut empty buckets at start or end of spans, even in the middle", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 6}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 0, 0, 4.2, 0.1, 3.3, 0, 0, 0}, + NegativeSpans: []Span{{0, 2}, {2, 6}}, + NegativeBuckets: []float64{3.1, 3, 0, 1.234e5, 1000, 3, 4, 0}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "cut empty buckets at start or end but merge spans due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 4}, {5, 3}}, + PositiveBuckets: []float64{0, 0, 1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4, 0}, + }, + 10, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 10}}, + PositiveBuckets: []float64{1, 3.3, 0, 0, 0, 0, 0, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 9}}, + NegativeBuckets: []float64{3.1, 3, 0, 0, 0, 1.234e5, 1000, 3, 4}, + }, + }, + { + "cut empty buckets from the middle of a span", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 2}, {1, 2}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "cut out a span containing only empty buckets", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 3}, {2, 2}, {3, 4}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {7, 4}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + }, + }, + { + "cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + 1, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, + PositiveBuckets: []float64{1, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + }, + { + "avoiding all cutting of empty buckets from the middle of a chunk due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + 2, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 4}, {3, 3}}, + PositiveBuckets: []float64{1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + }, + { + "everything merged into one span due to maxEmptyBuckets", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 1, 0, 0, 3.3, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000, 0, 3, 4}, + }, + 3, + &FloatHistogram{ + PositiveSpans: []Span{{-2, 10}}, + PositiveBuckets: []float64{1, 0, 0, 3.3, 0, 0, 0, 4.2, 0.1, 3.3}, + NegativeSpans: []Span{{0, 10}}, + NegativeBuckets: []float64{3.1, 3, 0, 0, 0, 1.234e5, 1000, 0, 3, 4}, + }, + }, + { + "only empty buckets and maxEmptyBuckets greater zero", + &FloatHistogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 0, 0, 0, 0, 0, 0, 0}, + NegativeSpans: []Span{{0, 7}}, + NegativeBuckets: []float64{0, 0, 0, 0, 0, 0, 0}, + }, + 3, + &FloatHistogram{ + PositiveSpans: []Span{}, + PositiveBuckets: []float64{}, + NegativeSpans: []Span{}, + NegativeBuckets: []float64{}, + }, + }, + { + "multiple spans of only empty buckets", + &FloatHistogram{ + PositiveSpans: []Span{{-10, 2}, {2, 1}, {3, 3}}, + PositiveBuckets: []float64{0, 0, 0, 0, 2, 3}, + NegativeSpans: []Span{{-10, 2}, {2, 1}, {3, 3}}, + NegativeBuckets: []float64{2, 3, 0, 0, 0, 0}, + }, + 0, + &FloatHistogram{ + PositiveSpans: []Span{{-1, 2}}, + PositiveBuckets: []float64{2, 3}, + NegativeSpans: []Span{{-10, 2}}, + NegativeBuckets: []float64{2, 3}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in.Compact(c.maxEmptyBuckets)) + // Compact has happened in-place, too. + require.Equal(t, c.expected, c.in) + }) + } +} + +func TestFloatHistogramAdd(t *testing.T) { + cases := []struct { + name string + in1, in2, expected *FloatHistogram + }{ + { + "same bucket layout", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{4, 2, 9, 10}, + }, + }, + { + "same bucket layout, defined differently", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-2, 2}, {1, 2}, {0, 1}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + NegativeSpans: []Span{{3, 7}}, + NegativeBuckets: []float64{1, 1, 0, 0, 0, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {1, 1}, {0, 2}}, + PositiveBuckets: []float64{1, 0, 5, 7, 13}, + NegativeSpans: []Span{{3, 5}, {0, 2}}, + NegativeBuckets: []float64{4, 2, 0, 0, 0, 9, 10}, + }, + }, + { + "non-overlapping spans", + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.001, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 4}, {0, 6}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, + }, + }, + { + "non-overlapping inverted order", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{0, 2}, {3, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 2}, {0, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 0, 5, 4, 3, 4, 7, 2, 3, 6}, + NegativeSpans: []Span{{-9, 2}, {3, 2}, {5, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4, 3, 1, 5, 6}, + }, + }, + { + "overlapping spans", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 4}, {0, 4}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "overlapping spans inverted order", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 19, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero bucket in first histogram", + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero bucket in second histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 1, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 5}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero threshold in first histogram ends up inside a populated bucket of second histogram", + &FloatHistogram{ + ZeroThreshold: 0.2, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 1}, {1, 5}}, + PositiveBuckets: []float64{0, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "larger zero threshold in second histogram ends up inside a populated bucket of first histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.2, + ZeroCount: 17, + Count: 21, + Sum: 1.234, + PositiveSpans: []Span{{1, 2}, {0, 3}}, + PositiveBuckets: []float64{2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 29, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{1, 5}}, + PositiveBuckets: []float64{2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change combined with larger zero bucket in second histogram", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 12, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-3, 2}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 22, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 7}}, + PositiveBuckets: []float64{6, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + { + "schema change combined with larger zero bucket in first histogram", + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 8, + Count: 21, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 3, 6, 2, 5}, + NegativeSpans: []Span{{4, 2}, {1, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.25, + ZeroCount: 20, + Count: 51, + Sum: 3.579, + PositiveSpans: []Span{{-1, 4}, {0, 3}}, + PositiveBuckets: []float64{5, 4, 2, 6, 10, 9, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{3, 2, 1, 4, 9, 6}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in1.Add(c.in2)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.in1) + }) + } +} + +func TestFloatHistogramSub(t *testing.T) { + // This has fewer test cases than TestFloatHistogramAdd because Add and + // Sub share most of the trickier code. + cases := []struct { + name string + in1, in2, expected *FloatHistogram + }{ + { + "same bucket layout", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 23, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 21, + Sum: 12, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{0, 0, 2, 3, 6}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{1, 1, 4, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 3, + Count: 9, + Sum: 11, + PositiveSpans: []Span{{-2, 2}, {1, 3}}, + PositiveBuckets: []float64{1, 0, 1, 1, 1}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{2, 0, 1, 2}, + }, + }, + { + "schema change", + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 8, + Count: 59, + Sum: 1.234, + Schema: 0, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{2, 5, 4, 2, 3, 6, 7, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{4, 10, 1, 4, 14, 7}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 2, + Count: 19, + Sum: 0.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 1, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 6, + Count: 40, + Sum: 0.889, + PositiveSpans: []Span{{-2, 5}, {0, 3}}, + PositiveBuckets: []float64{1, 5, 4, 2, 2, 2, 0, 5}, + NegativeSpans: []Span{{3, 3}, {1, 3}}, + NegativeBuckets: []float64{1, 9, 1, 4, 9, 1}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in1.Sub(c.in2)) + // Has it also happened in-place? + require.Equal(t, c.expected, c.in1) + }) + } +} + +func TestFloatHistogramCopyToSchema(t *testing.T) { + cases := []struct { + name string + targetSchema int32 + in, expected *FloatHistogram + }{ + { + "no schema change", + 1, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + }, + { + "schema change", + 0, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 1, + PositiveSpans: []Span{{-4, 3}, {5, 5}}, + PositiveBuckets: []float64{1, 0, 0, 3, 2, 2, 3, 4}, + NegativeSpans: []Span{{6, 3}, {6, 4}}, + NegativeBuckets: []float64{3, 0.5, 0.5, 2, 3, 2, 4}, + }, + &FloatHistogram{ + ZeroThreshold: 0.01, + ZeroCount: 11, + Count: 30, + Sum: 2.345, + Schema: 0, + PositiveSpans: []Span{{-2, 2}, {2, 3}}, + PositiveBuckets: []float64{1, 0, 3, 4, 7}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []float64{3, 1, 5, 6}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in.CopyToSchema(c.targetSchema)) + }) + } +} + +func TestReverseFloatBucketIterator(t *testing.T) { + h := &FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + } + + // Assuming that the regular iterator is correct. + + // Positive buckets. + var expBuckets, actBuckets []Bucket[float64] + it := h.PositiveBucketIterator() + for it.Next() { + // Append in reverse to check reversed list. + expBuckets = append([]Bucket[float64]{it.At()}, expBuckets...) + } + it = h.PositiveReverseBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + require.Greater(t, len(expBuckets), 0) + require.Greater(t, len(actBuckets), 0) + require.Equal(t, expBuckets, actBuckets) + + // Negative buckets. + expBuckets = expBuckets[:0] + actBuckets = actBuckets[:0] + it = h.NegativeBucketIterator() + for it.Next() { + // Append in reverse to check reversed list. + expBuckets = append([]Bucket[float64]{it.At()}, expBuckets...) + } + it = h.NegativeReverseBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + require.Greater(t, len(expBuckets), 0) + require.Greater(t, len(actBuckets), 0) + require.Equal(t, expBuckets, actBuckets) +} + +func TestAllFloatBucketIterator(t *testing.T) { + cases := []struct { + h FloatHistogram + // To determine the expected buckets. + includeNeg, includeZero, includePos bool + }{ + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + }, + includeNeg: false, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + }, + includeNeg: false, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 0, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: false, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.5, // Coinciding with bucket boundary. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.6, // Within the bucket closest to zero. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var expBuckets, actBuckets []Bucket[float64] + + if c.includeNeg { + it := c.h.NegativeReverseBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Upper > -c.h.ZeroThreshold { + b.Upper = -c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + if c.includeZero { + expBuckets = append(expBuckets, Bucket[float64]{ + Lower: -c.h.ZeroThreshold, + Upper: c.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: c.h.ZeroCount, + }) + } + if c.includePos { + it := c.h.PositiveBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Lower < c.h.ZeroThreshold { + b.Lower = c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + + it := c.h.AllBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + + require.Equal(t, expBuckets, actBuckets) + }) + } +} + +func TestAllReverseFloatBucketIterator(t *testing.T) { + cases := []struct { + h FloatHistogram + // To determine the expected buckets. + includeNeg, includeZero, includePos bool + }{ + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + }, + includeNeg: false, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 102, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + }, + includeNeg: false, + includeZero: true, + includePos: false, + }, + { + h: FloatHistogram{ + Count: 405, + ZeroCount: 0, + ZeroThreshold: 0.001, + Sum: 1008.4, + Schema: 1, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: false, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.5, // Coinciding with bucket boundary. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + { + h: FloatHistogram{ + Count: 447, + ZeroCount: 42, + ZeroThreshold: 0.6, // Within the bucket closest to zero. + Sum: 1008.4, + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 3}, + {Offset: 3, Length: 0}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + PositiveBuckets: []float64{100, 344, 123, 55, 3, 63, 2, 54, 235, 33}, + NegativeSpans: []Span{ + {Offset: 0, Length: 3}, + {Offset: 1, Length: 0}, + {Offset: 3, Length: 0}, + {Offset: 3, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 5, Length: 3}, + }, + NegativeBuckets: []float64{10, 34, 1230, 54, 67, 63, 2, 554, 235, 33}, + }, + includeNeg: true, + includeZero: true, + includePos: true, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + var expBuckets, actBuckets []Bucket[float64] + + if c.includePos { + it := c.h.PositiveReverseBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Lower < c.h.ZeroThreshold { + b.Lower = c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + if c.includeZero { + expBuckets = append(expBuckets, Bucket[float64]{ + Lower: -c.h.ZeroThreshold, + Upper: c.h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: c.h.ZeroCount, + }) + } + if c.includeNeg { + it := c.h.NegativeBucketIterator() + for it.Next() { + b := it.At() + if c.includeZero && b.Upper > -c.h.ZeroThreshold { + b.Upper = -c.h.ZeroThreshold + } + expBuckets = append(expBuckets, b) + } + } + + it := c.h.AllReverseBucketIterator() + for it.Next() { + actBuckets = append(actBuckets, it.At()) + } + + require.Equal(t, expBuckets, actBuckets) + }) + } +} diff --git a/model/histogram/generic.go b/model/histogram/generic.go new file mode 100644 index 0000000000..e1de5ffb52 --- /dev/null +++ b/model/histogram/generic.go @@ -0,0 +1,548 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// BucketCount is a type constraint for the count in a bucket, which can be +// float64 (for type FloatHistogram) or uint64 (for type Histogram). +type BucketCount interface { + float64 | uint64 +} + +// InternalBucketCount is used internally by Histogram and FloatHistogram. The +// difference to the BucketCount above is that Histogram internally uses deltas +// between buckets rather than absolute counts (while FloatHistogram uses +// absolute counts directly). Go type parameters don't allow type +// specialization. Therefore, where special treatment of deltas between buckets +// vs. absolute counts is important, this information has to be provided as a +// separate boolean parameter "deltaBuckets" +type InternalBucketCount interface { + float64 | int64 +} + +// Bucket represents a bucket with lower and upper limit and the absolute count +// of samples in the bucket. It also specifies if each limit is inclusive or +// not. (Mathematically, inclusive limits create a closed interval, and +// non-inclusive limits an open interval.) +// +// To represent cumulative buckets, Lower is set to -Inf, and the Count is then +// cumulative (including the counts of all buckets for smaller values). +type Bucket[BC BucketCount] struct { + Lower, Upper float64 + LowerInclusive, UpperInclusive bool + Count BC + + // Index within schema. To easily compare buckets that share the same + // schema and sign (positive or negative). Irrelevant for the zero bucket. + Index int32 +} + +// String returns a string representation of a Bucket, using the usual +// mathematical notation of '['/']' for inclusive bounds and '('/')' for +// non-inclusive bounds. +func (b Bucket[BC]) String() string { + var sb strings.Builder + if b.LowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if b.UpperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +// BucketIterator iterates over the buckets of a Histogram, returning decoded +// buckets. +type BucketIterator[BC BucketCount] interface { + // Next advances the iterator by one. + Next() bool + // At returns the current bucket. + At() Bucket[BC] +} + +// baseBucketIterator provides a struct that is shared by most BucketIterator +// implementations, together with an implementation of the At method. This +// iterator can be embedded in full implementations of BucketIterator to save on +// code replication. +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { + schema int32 + spans []Span + buckets []IBC + + positive bool // Whether this is for positive buckets. + + spansIdx int // Current span within spans slice. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + bucketsIdx int // Current bucket within buckets slice. + + currCount IBC // Count in the current bucket. + currIdx int32 // The actual bucket index. +} + +func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { + bucket := Bucket[BC]{ + Count: BC(b.currCount), + Index: b.currIdx, + } + if b.positive { + bucket.Upper = getBound(b.currIdx, b.schema) + bucket.Lower = getBound(b.currIdx-1, b.schema) + } else { + bucket.Lower = -getBound(b.currIdx, b.schema) + bucket.Upper = -getBound(b.currIdx-1, b.schema) + } + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 + return bucket +} + +// compactBuckets is a generic function used by both Histogram.Compact and +// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { + // Fast path: If there are no empty buckets AND no offset in any span is + // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return + // immediately. We check that first because it's cheap and presumably + // common. + nothingToDo := true + var currentBucketAbsolute IBC + for _, bucket := range buckets { + if deltaBuckets { + currentBucketAbsolute += bucket + } else { + currentBucketAbsolute = bucket + } + if currentBucketAbsolute == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + for _, span := range spans { + if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + return buckets, spans + } + } + + var iBucket, iSpan int + var posInSpan uint32 + currentBucketAbsolute = 0 + + // Helper function. + emptyBucketsHere := func() int { + i := 0 + abs := currentBucketAbsolute + for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 { + i++ + if i+iBucket >= len(buckets) { + break + } + abs = buckets[i+iBucket] + } + return i + } + + // Merge spans with zero-offset to avoid special cases later. + if len(spans) > 1 { + for i, span := range spans[1:] { + if span.Offset == 0 { + spans[iSpan].Length += span.Length + continue + } + iSpan++ + if i+1 != iSpan { + spans[iSpan] = span + } + } + spans = spans[:iSpan+1] + iSpan = 0 + } + + // Merge spans with zero-length to avoid special cases later. + for i, span := range spans { + if span.Length == 0 { + if i+1 < len(spans) { + spans[i+1].Offset += span.Offset + } + continue + } + if i != iSpan { + spans[iSpan] = span + } + iSpan++ + } + spans = spans[:iSpan] + iSpan = 0 + + // Cut out empty buckets from start and end of spans, no matter + // what. Also cut out empty buckets from the middle of a span but only + // if there are more than maxEmptyBuckets consecutive empty buckets. + for iBucket < len(buckets) { + if deltaBuckets { + currentBucketAbsolute += buckets[iBucket] + } else { + currentBucketAbsolute = buckets[iBucket] + } + if nEmpty := emptyBucketsHere(); nEmpty > 0 { + if posInSpan > 0 && + nEmpty < int(spans[iSpan].Length-posInSpan) && + nEmpty <= maxEmptyBuckets { + // The empty buckets are in the middle of a + // span, and there are few enough to not bother. + // Just fast-forward. + iBucket += nEmpty + if deltaBuckets { + currentBucketAbsolute = 0 + } + posInSpan += uint32(nEmpty) + continue + } + // In all other cases, we cut out the empty buckets. + if deltaBuckets && iBucket+nEmpty < len(buckets) { + currentBucketAbsolute = -buckets[iBucket] + buckets[iBucket+nEmpty] += buckets[iBucket] + } + buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...) + if posInSpan == 0 { + // Start of span. + if nEmpty == int(spans[iSpan].Length) { + // The whole span is empty. + offset := spans[iSpan].Offset + spans = append(spans[:iSpan], spans[iSpan+1:]...) + if len(spans) > iSpan { + spans[iSpan].Offset += offset + int32(nEmpty) + } + continue + } + spans[iSpan].Length -= uint32(nEmpty) + spans[iSpan].Offset += int32(nEmpty) + continue + } + // It's in the middle or in the end of the span. + // Split the current span. + newSpan := Span{ + Offset: int32(nEmpty), + Length: spans[iSpan].Length - posInSpan - uint32(nEmpty), + } + spans[iSpan].Length = posInSpan + // In any case, we have to split to the next span. + iSpan++ + posInSpan = 0 + if newSpan.Length == 0 { + // The span is empty, so we were already at the end of a span. + // We don't have to insert the new span, just adjust the next + // span's offset, if there is one. + if iSpan < len(spans) { + spans[iSpan].Offset += int32(nEmpty) + } + continue + } + // Insert the new span. + spans = append(spans, Span{}) + if iSpan+1 < len(spans) { + copy(spans[iSpan+1:], spans[iSpan:]) + } + spans[iSpan] = newSpan + continue + } + iBucket++ + posInSpan++ + if posInSpan >= spans[iSpan].Length { + posInSpan = 0 + iSpan++ + } + } + if maxEmptyBuckets == 0 || len(buckets) == 0 { + return buckets, spans + } + + // Finally, check if any offsets between spans are small enough to merge + // the spans. + iBucket = int(spans[0].Length) + if deltaBuckets { + currentBucketAbsolute = 0 + for _, bucket := range buckets[:iBucket] { + currentBucketAbsolute += bucket + } + } + iSpan = 1 + for iSpan < len(spans) { + if int(spans[iSpan].Offset) > maxEmptyBuckets { + l := int(spans[iSpan].Length) + if deltaBuckets { + for _, bucket := range buckets[iBucket : iBucket+l] { + currentBucketAbsolute += bucket + } + } + iBucket += l + iSpan++ + continue + } + // Merge span with previous one and insert empty buckets. + offset := int(spans[iSpan].Offset) + spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length + spans = append(spans[:iSpan], spans[iSpan+1:]...) + newBuckets := make([]IBC, len(buckets)+offset) + copy(newBuckets, buckets[:iBucket]) + copy(newBuckets[iBucket+offset:], buckets[iBucket:]) + if deltaBuckets { + newBuckets[iBucket] = -currentBucketAbsolute + newBuckets[iBucket+offset] += currentBucketAbsolute + } + iBucket += offset + buckets = newBuckets + currentBucketAbsolute = buckets[iBucket] + // Note that with many merges, it would be more efficient to + // first record all the chunks of empty buckets to insert and + // then do it in one go through all the buckets. + } + + return buckets, spans +} + +func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if b != b2[i] { + return false + } + } + return true +} + +func getBound(idx, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with an idx + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its idx + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := int(idx) << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := idx & ((1 << schema) - 1) + frac := exponentialBounds[schema][fracIdx] + exp := (int(idx) >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// exponentialBounds is a precalculated table of bucket bounds in the interval +// [0.5,1) in schema 0 to 8. +var exponentialBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} diff --git a/model/histogram/generic_test.go b/model/histogram/generic_test.go new file mode 100644 index 0000000000..55015c047f --- /dev/null +++ b/model/histogram/generic_test.go @@ -0,0 +1,112 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetBound(t *testing.T) { + scenarios := []struct { + idx int32 + schema int32 + want float64 + }{ + { + idx: -1, + schema: -1, + want: 0.25, + }, + { + idx: 0, + schema: -1, + want: 1, + }, + { + idx: 1, + schema: -1, + want: 4, + }, + { + idx: 512, + schema: -1, + want: math.MaxFloat64, + }, + { + idx: 513, + schema: -1, + want: math.Inf(+1), + }, + { + idx: -1, + schema: 0, + want: 0.5, + }, + { + idx: 0, + schema: 0, + want: 1, + }, + { + idx: 1, + schema: 0, + want: 2, + }, + { + idx: 1024, + schema: 0, + want: math.MaxFloat64, + }, + { + idx: 1025, + schema: 0, + want: math.Inf(+1), + }, + { + idx: -1, + schema: 2, + want: 0.8408964152537144, + }, + { + idx: 0, + schema: 2, + want: 1, + }, + { + idx: 1, + schema: 2, + want: 1.189207115002721, + }, + { + idx: 4096, + schema: 2, + want: math.MaxFloat64, + }, + { + idx: 4097, + schema: 2, + want: math.Inf(+1), + }, + } + + for _, s := range scenarios { + got := getBound(s.idx, s.schema) + if s.want != got { + require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema) + } + } +} diff --git a/model/histogram/histogram.go b/model/histogram/histogram.go new file mode 100644 index 0000000000..6d425307c5 --- /dev/null +++ b/model/histogram/histogram.go @@ -0,0 +1,450 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + +// Histogram encodes a sparse, high-resolution histogram. See the design +// document for full details: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# +// +// The most tricky bit is how bucket indices represent real bucket boundaries. +// An example for schema 0 (by which each bucket is twice as wide as the +// previous bucket): +// +// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] .... +// ↑ ↑ ↑ ↑ ↑ ↑ ↑ +// Zero bucket (width e.g. 0.001) → | | | ZB | | | +// Positive bucket indices → | | | ... -1 0 1 2 3 +// Negative bucket indices → 3 2 1 0 -1 ... +// +// Which bucket indices are actually used is determined by the spans. +type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8. They are all for + // base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. Or + // in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. + ZeroCount uint64 + // Total number of observations. + Count uint64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. The first element is an absolute + // count. All following ones are deltas relative to the previous + // element. + PositiveBuckets, NegativeBuckets []int64 +} + +// A Span defines a continuous sequence of buckets. +type Span struct { + // Gap to previous span (always positive), or starting index for the 1st + // span (which can be negative). + Offset int32 + // Length of the span. + Length uint32 +} + +// Copy returns a deep copy of the Histogram. +func (h *Histogram) Copy() *Histogram { + c := *h + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + + return &c +} + +// String returns a string representation of the Histogram. +func (h *Histogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[uint64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. +func (h *Histogram) ZeroBucket() Bucket[uint64] { + return Bucket[uint64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going down). +func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { + return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) +} + +// CumulativeBucketIterator returns a BucketIterator to iterate over a +// cumulative view of the buckets. This method currently only supports +// Histograms without negative buckets and panics if the Histogram has negative +// buckets. It is currently only used for testing. +func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { + if len(h.NegativeBuckets) > 0 { + panic("CumulativeBucketIterator called on Histogram with negative buckets") + } + return &cumulativeBucketIterator{h: h, posSpansIdx: -1} +} + +// Equals returns true if the given histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *Histogram) Equals(h2 *Histogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + +// spansMatch returns true if both spans represent the same bucket layout +// after combining zero length spans with the next non-zero length span. +func spansMatch(s1, s2 []Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + +// Compact works like FloatHistogram.Compact. See there for detailed +// explanations. +func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true, + ) + return h +} + +// ToFloat returns a FloatHistogram representation of the Histogram. It is a +// deep copy (e.g. spans are not shared). +func (h *Histogram) ToFloat() *FloatHistogram { + var ( + positiveSpans, negativeSpans []Span + positiveBuckets, negativeBuckets []float64 + ) + if len(h.PositiveSpans) != 0 { + positiveSpans = make([]Span, len(h.PositiveSpans)) + copy(positiveSpans, h.PositiveSpans) + } + if len(h.NegativeSpans) != 0 { + negativeSpans = make([]Span, len(h.NegativeSpans)) + copy(negativeSpans, h.NegativeSpans) + } + if len(h.PositiveBuckets) != 0 { + positiveBuckets = make([]float64, len(h.PositiveBuckets)) + var current float64 + for i, b := range h.PositiveBuckets { + current += float64(b) + positiveBuckets[i] = current + } + } + if len(h.NegativeBuckets) != 0 { + negativeBuckets = make([]float64, len(h.NegativeBuckets)) + var current float64 + for i, b := range h.NegativeBuckets { + current += float64(b) + negativeBuckets[i] = current + } + } + + return &FloatHistogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.ZeroCount), + Count: float64(h.Count), + Sum: h.Sum, + PositiveSpans: positiveSpans, + NegativeSpans: negativeSpans, + PositiveBuckets: positiveBuckets, + NegativeBuckets: negativeBuckets, + } +} + +type regularBucketIterator struct { + baseBucketIterator[uint64, int64] +} + +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator { + i := baseBucketIterator[uint64, int64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + } + return ®ularBucketIterator{i} +} + +func (r *regularBucketIterator) Next() bool { + if r.spansIdx >= len(r.spans) { + return false + } + span := r.spans[r.spansIdx] + // Seed currIdx for the first bucket. + if r.bucketsIdx == 0 { + r.currIdx = span.Offset + } else { + r.currIdx++ + } + for r.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + r.idxInSpan = 0 + r.spansIdx++ + if r.spansIdx >= len(r.spans) { + return false + } + span = r.spans[r.spansIdx] + r.currIdx += span.Offset + } + + r.currCount += r.buckets[r.bucketsIdx] + r.idxInSpan++ + r.bucketsIdx++ + return true +} + +type cumulativeBucketIterator struct { + h *Histogram + + posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket. + posBucketsIdx int // Index in h.PositiveBuckets. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + + initialized bool + currIdx int32 // The actual bucket index after decoding from spans. + currUpper float64 // The upper boundary of the current bucket. + currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket. + currCumulativeCount uint64 // Current "cumulative" count for the current bucket. + + // Between 2 spans there could be some empty buckets which + // still needs to be counted for cumulative buckets. + // When we hit the end of a span, we use this to iterate + // through the empty buckets. + emptyBucketCount int32 +} + +func (c *cumulativeBucketIterator) Next() bool { + if c.posSpansIdx == -1 { + // Zero bucket. + c.posSpansIdx++ + if c.h.ZeroCount == 0 { + return c.Next() + } + + c.currUpper = c.h.ZeroThreshold + c.currCount = int64(c.h.ZeroCount) + c.currCumulativeCount = uint64(c.currCount) + return true + } + + if c.posSpansIdx >= len(c.h.PositiveSpans) { + return false + } + + if c.emptyBucketCount > 0 { + // We are traversing through empty buckets at the moment. + c.currUpper = getBound(c.currIdx, c.h.Schema) + c.currIdx++ + c.emptyBucketCount-- + return true + } + + span := c.h.PositiveSpans[c.posSpansIdx] + if c.posSpansIdx == 0 && !c.initialized { + // Initializing. + c.currIdx = span.Offset + // The first bucket is an absolute value and not a delta with Zero bucket. + c.currCount = 0 + c.initialized = true + } + + c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] + c.currCumulativeCount += uint64(c.currCount) + c.currUpper = getBound(c.currIdx, c.h.Schema) + + c.posBucketsIdx++ + c.idxInSpan++ + c.currIdx++ + if c.idxInSpan >= span.Length { + // Move to the next span. This one is done. + c.posSpansIdx++ + c.idxInSpan = 0 + if c.posSpansIdx < len(c.h.PositiveSpans) { + c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset + } + } + + return true +} + +func (c *cumulativeBucketIterator) At() Bucket[uint64] { + return Bucket[uint64]{ + Upper: c.currUpper, + Lower: math.Inf(-1), + UpperInclusive: true, + LowerInclusive: true, + Count: c.currCumulativeCount, + Index: c.currIdx - 1, + } +} diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go new file mode 100644 index 0000000000..3975353d2a --- /dev/null +++ b/model/histogram/histogram_test.go @@ -0,0 +1,796 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHistogramString(t *testing.T) { + cases := []struct { + histogram Histogram + expectedString string + }{ + { + histogram: Histogram{ + Schema: 0, + }, + expectedString: "{count:0, sum:0}", + }, + { + histogram: Histogram{ + Schema: 0, + Count: 9, + Sum: -3.1415, + ZeroCount: 12, + ZeroThreshold: 0.001, + NegativeSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedString: "{count:9, sum:-3.1415, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, [-0.001,0.001]:12}", + }, + { + histogram: Histogram{ + Schema: 0, + Count: 19, + Sum: 2.7, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}", + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + actualString := c.histogram.String() + require.Equal(t, c.expectedString, actualString) + }) + } +} + +func TestCumulativeBucketIterator(t *testing.T) { + cases := []struct { + histogram Histogram + expectedBuckets []Bucket[uint64] + }{ + { + histogram: Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 2, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 4, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2}, + + {Lower: math.Inf(-1), Upper: 8, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3}, + {Lower: math.Inf(-1), Upper: 16, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4}, + }, + }, + { + histogram: Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 1}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 2, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 1}, + {Lower: math.Inf(-1), Upper: 4, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 2}, + {Lower: math.Inf(-1), Upper: 8, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3}, + + {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4}, + + {Lower: math.Inf(-1), Upper: 32, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 5}, + {Lower: math.Inf(-1), Upper: 64, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 6}, + }, + }, + { + histogram: Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 1, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 2, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 1}, + {Lower: math.Inf(-1), Upper: 4, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 2}, + {Lower: math.Inf(-1), Upper: 8, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3}, + {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4}, + {Lower: math.Inf(-1), Upper: 32, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 5}, + {Lower: math.Inf(-1), Upper: 64, Count: 10, LowerInclusive: true, UpperInclusive: true, Index: 6}, + }, + }, + { + histogram: Histogram{ + Schema: 3, + PositiveSpans: []Span{ + {Offset: -5, Length: 2}, // -5 -4 + {Offset: 2, Length: 3}, // -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0.6484197773255048, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -5}, + {Lower: math.Inf(-1), Upper: 0.7071067811865475, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -4}, + + {Lower: math.Inf(-1), Upper: 0.7711054127039704, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -3}, + {Lower: math.Inf(-1), Upper: 0.8408964152537144, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -2}, + + {Lower: math.Inf(-1), Upper: 0.9170040432046711, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: -1}, + {Lower: math.Inf(-1), Upper: 1, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 1.0905077326652577, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 1.189207115002721, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, + {Lower: math.Inf(-1), Upper: 1.2968395546510096, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 3}, + + {Lower: math.Inf(-1), Upper: 1.414213562373095, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 4}, + {Lower: math.Inf(-1), Upper: 1.5422108254079407, Count: 13, LowerInclusive: true, UpperInclusive: true, Index: 5}, + }, + }, + { + histogram: Histogram{ + Schema: -2, + PositiveSpans: []Span{ + {Offset: -2, Length: 4}, // -2 -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0.00390625, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -2}, + {Lower: math.Inf(-1), Upper: 0.0625, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -1}, + {Lower: math.Inf(-1), Upper: 1, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 16, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 1}, + + {Lower: math.Inf(-1), Upper: 256, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 2}, + {Lower: math.Inf(-1), Upper: 4096, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 3}, + + {Lower: math.Inf(-1), Upper: 65536, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 4}, + {Lower: math.Inf(-1), Upper: 1048576, Count: 9, LowerInclusive: true, UpperInclusive: true, Index: 5}, + }, + }, + { + histogram: Histogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: -2, Length: 5}, // -2 -1 0 1 2 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1}, + }, + expectedBuckets: []Bucket[uint64]{ + {Lower: math.Inf(-1), Upper: 0.0625, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: -2}, + {Lower: math.Inf(-1), Upper: 0.25, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: -1}, + {Lower: math.Inf(-1), Upper: 1, Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 0}, + {Lower: math.Inf(-1), Upper: 4, Count: 7, LowerInclusive: true, UpperInclusive: true, Index: 1}, + {Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2}, + }, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + it := c.histogram.CumulativeBucketIterator() + actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) + for it.Next() { + actualBuckets = append(actualBuckets, it.At()) + } + require.Equal(t, c.expectedBuckets, actualBuckets) + }) + } +} + +func TestRegularBucketIterator(t *testing.T) { + cases := []struct { + histogram Histogram + expectedPositiveBuckets []Bucket[uint64] + expectedNegativeBuckets []Bucket[uint64] + }{ + { + histogram: Histogram{ + Schema: 0, + }, + expectedPositiveBuckets: []Bucket[uint64]{}, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: 0.5, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0}, + {Lower: 1, Upper: 2, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 4, Upper: 8, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 8, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: 0, + NegativeSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedPositiveBuckets: []Bucket[uint64]{}, + expectedNegativeBuckets: []Bucket[uint64]{ + {Lower: -1, Upper: -0.5, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 0}, + {Lower: -2, Upper: -1, Count: 3, LowerInclusive: true, UpperInclusive: false, Index: 1}, + {Lower: -4, Upper: -2, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 2}, + {Lower: -8, Upper: -4, Count: 2, LowerInclusive: true, UpperInclusive: false, Index: 3}, + {Lower: -16, Upper: -8, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 4}, + + {Lower: -64, Upper: -32, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 6}, + }, + }, + { + histogram: Histogram{ + Schema: 0, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: 0.5, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0}, + {Lower: 1, Upper: 2, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: 1}, + {Lower: 2, Upper: 4, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 2}, + {Lower: 4, Upper: 8, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 3}, + {Lower: 8, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + {Lower: 16, Upper: 32, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 5}, + {Lower: 32, Upper: 64, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 6}, + }, + expectedNegativeBuckets: []Bucket[uint64]{ + {Lower: -1, Upper: -0.5, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 0}, + {Lower: -2, Upper: -1, Count: 3, LowerInclusive: true, UpperInclusive: false, Index: 1}, + {Lower: -4, Upper: -2, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 2}, + {Lower: -8, Upper: -4, Count: 2, LowerInclusive: true, UpperInclusive: false, Index: 3}, + {Lower: -16, Upper: -8, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 4}, + + {Lower: -64, Upper: -32, Count: 1, LowerInclusive: true, UpperInclusive: false, Index: 6}, + }, + }, + { + histogram: Histogram{ + Schema: 3, + PositiveSpans: []Span{ + {Offset: -5, Length: 2}, // -5 -4 + {Offset: 2, Length: 3}, // -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 3}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: 0.5946035575013605, Upper: 0.6484197773255048, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -5}, + {Lower: 0.6484197773255048, Upper: 0.7071067811865475, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -4}, + + {Lower: 0.8408964152537144, Upper: 0.9170040432046711, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -1}, + {Lower: 0.9170040432046711, Upper: 1, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 0}, + {Lower: 1, Upper: 1.0905077326652577, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 1.2968395546510096, Upper: 1.414213562373095, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + {Lower: 1.414213562373095, Upper: 1.5422108254079407, Count: 4, LowerInclusive: false, UpperInclusive: true, Index: 5}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: -2, + PositiveSpans: []Span{ + {Offset: -2, Length: 4}, // -2 -1 0 1 + {Offset: 2, Length: 2}, // 4 5 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: 0.000244140625, Upper: 0.00390625, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -2}, + {Lower: 0.00390625, Upper: 0.0625, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -1}, + {Lower: 0.0625, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0}, + {Lower: 1, Upper: 16, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + + {Lower: 4096, Upper: 65536, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4}, + {Lower: 65536, Upper: 1048576, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 5}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + { + histogram: Histogram{ + Schema: -1, + PositiveSpans: []Span{ + {Offset: -2, Length: 5}, // -2 -1 0 1 2 + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1}, + }, + expectedPositiveBuckets: []Bucket[uint64]{ + {Lower: 0.015625, Upper: 0.0625, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: -2}, + {Lower: 0.0625, Upper: 0.25, Count: 3, LowerInclusive: false, UpperInclusive: true, Index: -1}, + {Lower: 0.25, Upper: 1, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 0}, + {Lower: 1, Upper: 4, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1}, + {Lower: 4, Upper: 16, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 2}, + }, + expectedNegativeBuckets: []Bucket[uint64]{}, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + it := c.histogram.PositiveBucketIterator() + actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) + for it.Next() { + actualPositiveBuckets = append(actualPositiveBuckets, it.At()) + } + require.Equal(t, c.expectedPositiveBuckets, actualPositiveBuckets) + it = c.histogram.NegativeBucketIterator() + actualNegativeBuckets := make([]Bucket[uint64], 0, len(c.expectedNegativeBuckets)) + for it.Next() { + actualNegativeBuckets = append(actualNegativeBuckets, it.At()) + } + require.Equal(t, c.expectedNegativeBuckets, actualNegativeBuckets) + }) + } +} + +func TestHistogramToFloat(t *testing.T) { + h := Histogram{ + Schema: 3, + Count: 61, + Sum: 2.7, + ZeroThreshold: 0.1, + ZeroCount: 42, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 5}, + {Offset: 1, Length: 0}, + {Offset: 0, Length: 1}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0}, + } + fh := h.ToFloat() + + require.Equal(t, h.String(), fh.String()) +} + +// TestHistogramMatches tests both Histogram and FloatHistogram. +func TestHistogramMatches(t *testing.T) { + h1 := Histogram{ + Schema: 3, + Count: 61, + Sum: 2.7, + ZeroThreshold: 0.1, + ZeroCount: 42, + PositiveSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 10, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []Span{ + {Offset: 0, Length: 4}, + {Offset: 10, Length: 3}, + }, + NegativeBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + } + + equals := func(h1, h2 Histogram) { + require.True(t, h1.Equals(&h2)) + require.True(t, h2.Equals(&h1)) + h1f, h2f := h1.ToFloat(), h2.ToFloat() + require.True(t, h1f.Equals(h2f)) + require.True(t, h2f.Equals(h1f)) + } + notEquals := func(h1, h2 Histogram) { + require.False(t, h1.Equals(&h2)) + require.False(t, h2.Equals(&h1)) + h1f, h2f := h1.ToFloat(), h2.ToFloat() + require.False(t, h1f.Equals(h2f)) + require.False(t, h2f.Equals(h1f)) + } + + h2 := h1.Copy() + equals(h1, *h2) + + // Changed spans but same layout. + h2.PositiveSpans = append(h2.PositiveSpans, Span{Offset: 5}) + h2.NegativeSpans = append(h2.NegativeSpans, Span{Offset: 2}) + equals(h1, *h2) + // Adding empty spans in between. + h2.PositiveSpans[1].Offset = 6 + h2.PositiveSpans = []Span{ + h2.PositiveSpans[0], + {Offset: 1}, + {Offset: 3}, + h2.PositiveSpans[1], + h2.PositiveSpans[2], + } + h2.NegativeSpans[1].Offset = 5 + h2.NegativeSpans = []Span{ + h2.NegativeSpans[0], + {Offset: 2}, + {Offset: 3}, + h2.NegativeSpans[1], + h2.NegativeSpans[2], + } + equals(h1, *h2) + + // All mismatches. + notEquals(h1, Histogram{}) + + h2.Schema = 1 + notEquals(h1, *h2) + + h2 = h1.Copy() + h2.Count++ + notEquals(h1, *h2) + + h2 = h1.Copy() + h2.Sum++ + notEquals(h1, *h2) + + h2 = h1.Copy() + h2.ZeroThreshold++ + notEquals(h1, *h2) + + h2 = h1.Copy() + h2.ZeroCount++ + notEquals(h1, *h2) + + // Changing value of buckets. + h2 = h1.Copy() + h2.PositiveBuckets[len(h2.PositiveBuckets)-1]++ + notEquals(h1, *h2) + h2 = h1.Copy() + h2.NegativeBuckets[len(h2.NegativeBuckets)-1]++ + notEquals(h1, *h2) + + // Changing bucket layout. + h2 = h1.Copy() + h2.PositiveSpans[1].Offset++ + notEquals(h1, *h2) + h2 = h1.Copy() + h2.NegativeSpans[1].Offset++ + notEquals(h1, *h2) + + // Adding an empty bucket. + h2 = h1.Copy() + h2.PositiveSpans[0].Offset-- + h2.PositiveSpans[0].Length++ + h2.PositiveBuckets = append([]int64{0}, h2.PositiveBuckets...) + notEquals(h1, *h2) + h2 = h1.Copy() + h2.NegativeSpans[0].Offset-- + h2.NegativeSpans[0].Length++ + h2.NegativeBuckets = append([]int64{0}, h2.NegativeBuckets...) + notEquals(h1, *h2) + + // Adding new bucket. + h2 = h1.Copy() + h2.PositiveSpans = append(h2.PositiveSpans, Span{ + Offset: 1, + Length: 1, + }) + h2.PositiveBuckets = append(h2.PositiveBuckets, 1) + notEquals(h1, *h2) + h2 = h1.Copy() + h2.NegativeSpans = append(h2.NegativeSpans, Span{ + Offset: 1, + Length: 1, + }) + h2.NegativeBuckets = append(h2.NegativeBuckets, 1) + notEquals(h1, *h2) +} + +func TestHistogramCompact(t *testing.T) { + cases := []struct { + name string + in *Histogram + maxEmptyBuckets int + expected *Histogram + }{ + { + "empty histogram", + &Histogram{}, + 0, + &Histogram{}, + }, + { + "nothing should happen", + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {2, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42}, + NegativeSpans: []Span{{3, 2}, {3, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000}, + }, + }, + { + "eliminate zero offsets", + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {0, 2}, {2, 1}, {0, 1}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 5}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 4}, {2, 2}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "eliminate zero length", + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {0, 0}, {2, 0}, {1, 4}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []int64{5, 3, 1.234e5, 1000, 3, 4}, + }, + }, + { + "eliminate multiple zero length spans", + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {9, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + }, + }, + { + "cut empty buckets at start or end", + &Histogram{ + PositiveSpans: []Span{{-4, 4}, {5, 3}}, + PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, 3, 4, -9}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []int64{5, 3, -4, -2, 3, 4}, + }, + }, + { + "cut empty buckets at start and end", + &Histogram{ + PositiveSpans: []Span{{-4, 4}, {5, 6}}, + PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3, -46, 0, 0}, + NegativeSpans: []Span{{-2, 4}, {3, 5}}, + NegativeBuckets: []int64{0, 0, 5, 3, -4, -2, 3, 4, -9}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []int64{5, 3, -4, -2, 3, 4}, + }, + }, + { + "cut empty buckets at start or end of spans, even in the middle", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 6}}, + PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0}, + NegativeSpans: []Span{{0, 2}, {2, 6}}, + NegativeBuckets: []int64{5, 3, -8, 4, -2, 3, 4, -9}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 2}, {5, 3}}, + PositiveBuckets: []int64{1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 4}}, + NegativeBuckets: []int64{5, 3, -4, -2, 3, 4}, + }, + }, + { + "cut empty buckets at start or end but merge spans due to maxEmptyBuckets", + &Histogram{ + PositiveSpans: []Span{{-4, 4}, {5, 3}}, + PositiveBuckets: []int64{0, 0, 1, 3, -3, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, 3, 4, -9}, + }, + 10, + &Histogram{ + PositiveSpans: []Span{{-2, 10}}, + PositiveBuckets: []int64{1, 3, -4, 0, 0, 0, 0, 1, 42, 3}, + NegativeSpans: []Span{{0, 9}}, + NegativeBuckets: []int64{5, 3, -8, 0, 0, 4, -2, 3, 4}, + }, + }, + { + "cut empty buckets from the middle of a span", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, + PositiveBuckets: []int64{1, 2, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 2}, {1, 2}}, + NegativeBuckets: []int64{5, 3, -4, -2, 1, 4}, + }, + }, + { + "cut out a span containing only empty buckets", + &Histogram{ + PositiveSpans: []Span{{-4, 3}, {2, 2}, {3, 4}}, + PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {7, 4}}, + PositiveBuckets: []int64{1, 2, -2, 42, 3}, + }, + }, + { + "cut empty buckets from the middle of a span, avoiding some due to maxEmptyBuckets", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + 1, + &Histogram{ + PositiveSpans: []Span{{-2, 1}, {2, 1}, {3, 3}}, + PositiveBuckets: []int64{1, 2, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + }, + { + "avoiding all cutting of empty buckets from the middle of a chunk due to maxEmptyBuckets", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + 2, + &Histogram{ + PositiveSpans: []Span{{-2, 4}, {3, 3}}, + PositiveBuckets: []int64{1, -1, 0, 3, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + }, + { + "everything merged into one span due to maxEmptyBuckets", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 1, -1, 0, 3, -2, 42, 3}, + NegativeSpans: []Span{{0, 2}, {3, 5}}, + NegativeBuckets: []int64{5, 3, -4, -2, -2, 3, 4}, + }, + 3, + &Histogram{ + PositiveSpans: []Span{{-2, 10}}, + PositiveBuckets: []int64{1, -1, 0, 3, -3, 0, 0, 1, 42, 3}, + NegativeSpans: []Span{{0, 10}}, + NegativeBuckets: []int64{5, 3, -8, 0, 0, 4, -2, -2, 3, 4}, + }, + }, + { + "only empty buckets and maxEmptyBuckets greater zero", + &Histogram{ + PositiveSpans: []Span{{-4, 6}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 0, 0, 0, 0, 0, 0, 0}, + NegativeSpans: []Span{{0, 7}}, + NegativeBuckets: []int64{0, 0, 0, 0, 0, 0, 0}, + }, + 3, + &Histogram{ + PositiveSpans: []Span{}, + PositiveBuckets: []int64{}, + NegativeSpans: []Span{}, + NegativeBuckets: []int64{}, + }, + }, + { + "multiple spans of only empty buckets", + &Histogram{ + PositiveSpans: []Span{{-10, 2}, {2, 1}, {3, 3}}, + PositiveBuckets: []int64{0, 0, 0, 0, 2, 3}, + NegativeSpans: []Span{{-10, 2}, {2, 1}, {3, 3}}, + NegativeBuckets: []int64{2, 3, -5, 0, 0, 0}, + }, + 0, + &Histogram{ + PositiveSpans: []Span{{-1, 2}}, + PositiveBuckets: []int64{2, 3}, + NegativeSpans: []Span{{-10, 2}}, + NegativeBuckets: []int64{2, 3}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + require.Equal(t, c.expected, c.in.Compact(c.maxEmptyBuckets)) + // Compact has happened in-place, too. + require.Equal(t, c.expected, c.in) + }) + } +} diff --git a/model/labels/labels.go b/model/labels/labels.go index 48237bdc0e..0c27e15c72 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -11,15 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !stringlabels + package labels import ( "bytes" "encoding/json" - "sort" "strconv" "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" ) // Well-known label names used by Prometheus components. @@ -134,6 +137,7 @@ func (ls Labels) MatchLabels(on bool, names ...string) Labels { } // Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. func (ls Labels) Hash() uint64 { // Use xxhash.Sum64(b) for fast path as it's faster. b := make([]byte, 0, 1024) @@ -165,11 +169,12 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { b = b[:0] i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: b = append(b, ls[i].Name...) b = append(b, seps[0]) b = append(b, ls[i].Value...) @@ -209,11 +214,12 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { b.WriteByte(labelSep) i, j := 0, 0 for i < len(ls) && j < len(names) { - if names[j] < ls[i].Name { + switch { + case names[j] < ls[i].Name: j++ - } else if ls[i].Name < names[j] { + case ls[i].Name < names[j]: i++ - } else { + default: if b.Len() > 1 { b.WriteByte(seps[0]) } @@ -311,6 +317,19 @@ func (ls Labels) WithoutEmpty() Labels { return ls } +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + for _, l := range ls { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return false + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return false + } + } + return true +} + // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { @@ -342,10 +361,8 @@ func EmptyLabels() Labels { // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) - for _, l := range ls { - set = append(set, l) - } - sort.Sort(set) + set = append(set, ls...) + slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) return set } @@ -369,7 +386,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) return res } @@ -399,6 +416,49 @@ func Compare(a, b Labels) int { return len(a) - len(b) } +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + // Builder allows modifying Labels. type Builder struct { base Labels @@ -455,7 +515,7 @@ Outer: return b } -// Set the name/value pair as a label. +// Set the name/value pair as a label. A value of "" means delete that label. func (b *Builder) Set(n, v string) *Builder { if v == "" { // Empty labels are the same as missing labels. @@ -472,41 +532,109 @@ func (b *Builder) Set(n, v string) *Builder { return b } -// Labels returns the labels from the builder, adding them to res if non-nil. -// Argument res can be the same as b.base, if caller wants to overwrite that slice. +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +// Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. -func (b *Builder) Labels(res Labels) Labels { +func (b *Builder) Labels() Labels { if len(b.del) == 0 && len(b.add) == 0 { return b.base } - if res == nil { - // In the general case, labels are removed, modified or moved - // rather than added. - res = make(Labels, 0, len(b.base)) - } else { - res = res[:0] + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 } -Outer: - // Justification that res can be the same slice as base: in this loop - // we move forward through base, and either skip an element or assign - // it to res at its current position or an earlier position. + res := make(Labels, 0, expectedSize) for _, l := range b.base { - for _, n := range b.del { - if l.Name == n { - continue Outer - } - } - for _, la := range b.add { - if l.Name == la.Name { - continue Outer - } + if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { + continue } res = append(res, l) } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) } return res } + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Return the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go new file mode 100644 index 0000000000..223aa6ebf7 --- /dev/null +++ b/model/labels/labels_stringlabels.go @@ -0,0 +1,857 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "bytes" + "encoding/json" + "reflect" + "strconv" + "unsafe" + + "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" +) + +// Well-known label names used by Prometheus components. +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +// Labels is implemented by a single flat string holding name/value pairs. +// Each name and value is preceded by its length in varint encoding. +// Names are in order. +type Labels struct { + data string +} + +type labelSlice []Label + +func (ls labelSlice) Len() int { return len(ls) } +func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +func decodeSize(data string, index int) (int, int) { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { + // Just panic if we go of the end of data, since all Labels strings are constructed internally and + // malformed data indicates a bug, or memory corruption. + b := data[index] + index++ + size |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + return size, index +} + +func decodeString(data string, index int) (string, int) { + var size int + size, index = decodeSize(data, index) + return data[index : index+size], index + size +} + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + b.WriteString(name) + b.WriteByte('=') + b.WriteString(strconv.Quote(value)) + } + b.WriteByte('}') + return b.String() +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + if cap(buf) < len(ls.data) { + buf = make([]byte, len(ls.data)) + } else { + buf = buf[:len(ls.data)] + } + copy(buf, ls.data) + return buf +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + return xxhash.Sum64(yoloBytes(ls.data)) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + buf := append([]byte{}, ls.data...) + return Labels{data: yoloString(buf)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size + } + size, i = decodeSize(ls.data, i) + i += size + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + var lName, prevName string + for i := 0; i < len(ls.data); { + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == prevName { + return lName, true + } + prevName = lName + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for pos := 0; pos < len(ls.data); { + _, newPos := decodeString(ls.data, pos) + lValue, newPos := decodeString(ls.data, newPos) + if lValue != "" { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeString(ls.data, pos) + lValue, newPos = decodeString(ls.data, newPos) + if lValue != "" { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{data: yoloString(buf)} + } + return ls +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return ls.data == o.data +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string, len(ls.data)/10) + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + m[lName] = lValue + } + return m +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func yoloBytes(s string) (b []byte) { + *(*string)(unsafe.Pointer(&b)) = s + (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) + return +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break + } + } + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent := i + for i = 0; ; { + size, nextI := decodeSize(a.data, i) + if nextI+size > firstCharDifferent { + break + } + i = nextI + size + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + ls.data = b.data // strings are immutable +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + i += size + size, i = decodeSize(ls.data, i) + i += size + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + ls.data = intern(ls.data) +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + release(ls.data) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + for i := 0; i < len(base.data); { + var lName, lValue string + lName, i = decodeString(base.data, i) + lValue, i = decodeString(base.data, i) + if lValue == "" { + b.del = append(b.del, lName) + } + } +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { +Outer: + for i := 0; i < len(b.base.data); { + var lName string + lName, i = decodeString(b.base.data, i) + _, i = decodeString(b.base.data, i) + for _, n := range ns { + if lName == n { + continue Outer + } + } + b.del = append(b.del, lName) + } + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.Sort(b.del) + a, d := 0, 0 + + bufSize := len(b.base.data) + labelsSize(b.add) + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.data, pos) + _, pos = decodeString(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(buf, &b.add[a]) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(buf, &b.add[a]) + } + return Labels{data: yoloString(buf)} +} + +func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { + i := len(data) + for index := len(lbls) - 1; index >= 0; index-- { + size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) + i -= size + } + return len(data) - i +} + +func marshalLabelToSizedBuffer(m *Label, data []byte) int { + i := len(data) + i -= len(m.Value) + copy(data[i:], m.Value) + i = encodeSize(data, i, len(m.Value)) + i -= len(m.Name) + copy(data[i:], m.Name) + i = encodeSize(data, i, len(m.Name)) + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<7 { + return 1 + } + if x >= 1<<56 { + return 9 + } + if x >= 1<<28 { + x >>= 28 + n = 4 + } + if x >= 1<<14 { + x >>= 14 + n += 2 + } + if x >= 1<<7 { + n++ + } + return n + 1 +} + +func encodeVarint(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a size is less than 128 +func encodeSize(data []byte, offset, v int) int { + if v < 1<<7 { + offset-- + data[offset] = uint8(v) + return offset + } + return encodeVarint(data, offset, uint64(v)) +} + +func labelsSize(lbls []Label) (n int) { + // we just encode name/value/name/value, without any extra tags or length bytes + for _, e := range lbls { + n += labelSize(&e) + } + return n +} + +func labelSize(m *Label) (n int) { + // strings are encoded as length followed by contents. + l := len(m.Name) + n += l + sizeVarint(uint64(l)) + l = len(m.Value) + n += l + sizeVarint(uint64(l)) + return n +} + +func appendLabelTo(buf []byte, m *Label) []byte { + size := labelSize(m) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + marshalLabelToSizedBuffer(m, buf) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + size := labelsSize(b.add) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(b.add, buf) + b.output = Labels{data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + size := labelsSize(b.add) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 57ccf1fefa..d91be27cbc 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -36,10 +36,6 @@ func TestLabels_String(t *testing.T) { lables: Labels{}, expected: "{}", }, - { - lables: nil, - expected: "{}", - }, } for _, c := range cases { str := c.lables.String() @@ -216,6 +212,62 @@ func TestLabels_WithoutEmpty(t *testing.T) { } } +func TestLabels_IsValid(t *testing.T) { + for _, test := range []struct { + input Labels + expected bool + }{ + { + input: FromStrings( + "__name__", "test", + "hostname", "localhost", + "job", "check", + ), + expected: true, + }, + { + input: FromStrings( + "__name__", "test:ms", + "hostname_123", "localhost", + "_job", "check", + ), + expected: true, + }, + { + input: FromStrings("__name__", "test-ms"), + expected: false, + }, + { + input: FromStrings("__name__", "0zz"), + expected: false, + }, + { + input: FromStrings("abc:xyz", "invalid"), + expected: false, + }, + { + input: FromStrings("123abc", "invalid"), + expected: false, + }, + { + input: FromStrings("中文abc", "invalid"), + expected: false, + }, + { + input: FromStrings("invalid", "aa\xe2"), + expected: false, + }, + { + input: FromStrings("invalid", "\xF7\xBF\xBF\xBF"), + expected: false, + }, + } { + t.Run("", func(t *testing.T) { + require.Equal(t, test.expected, test.input.IsValid()) + }) + } +} + func TestLabels_Equal(t *testing.T) { labels := FromStrings( "aaa", "111", @@ -260,18 +312,18 @@ func TestLabels_Equal(t *testing.T) { func TestLabels_FromStrings(t *testing.T) { labels := FromStrings("aaa", "111", "bbb", "222") - expected := Labels{ - { - Name: "aaa", - Value: "111", - }, - { - Name: "bbb", - Value: "222", - }, - } - - require.Equal(t, expected, labels, "unexpected labelset") + x := 0 + labels.Range(func(l Label) { + switch x { + case 0: + require.Equal(t, Label{Name: "aaa", Value: "111"}, l, "unexpected value") + case 1: + require.Equal(t, Label{Name: "bbb", Value: "222"}, l, "unexpected value") + default: + t.Fatalf("unexpected labelset value %d: %v", x, l) + } + x++ + }) require.Panics(t, func() { FromStrings("aaa", "111", "bbb") }) //nolint:staticcheck // Ignore SA5012, error is intentional test. } @@ -309,6 +361,18 @@ func TestLabels_Compare(t *testing.T) { "bbc", "222"), expected: -1, }, + { + compared: FromStrings( + "aaa", "111", + "bb", "222"), + expected: 1, + }, + { + compared: FromStrings( + "aaa", "111", + "bbbb", "222"), + expected: -1, + }, { compared: FromStrings( "aaa", "111"), @@ -328,6 +392,10 @@ func TestLabels_Compare(t *testing.T) { "bbb", "222"), expected: 0, }, + { + compared: EmptyLabels(), + expected: 1, + }, } sign := func(a int) int { @@ -343,6 +411,8 @@ func TestLabels_Compare(t *testing.T) { for i, test := range tests { got := Compare(labels, test.compared) require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i) + got = Compare(test.compared, labels) + require.Equal(t, -sign(test.expected), sign(got), "unexpected comparison result for reverse test case %d", i) } } @@ -373,7 +443,8 @@ func TestLabels_Has(t *testing.T) { func TestLabels_Get(t *testing.T) { require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) - require.Equal(t, "111", FromStrings("aaa", "111", "bbb", "222").Get("aaa")) + require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) + require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } // BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation @@ -393,7 +464,7 @@ func BenchmarkLabels_Get(b *testing.B) { maxLabels := 30 allLabels := make([]Label, maxLabels) for i := 0; i < maxLabels; i++ { - allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)} + allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))} } for _, size := range []int{5, 10, maxLabels} { b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) { @@ -404,6 +475,7 @@ func BenchmarkLabels_Get(b *testing.B) { {"get first label", allLabels[0].Name}, {"get middle label", allLabels[size/2].Name}, {"get last label", allLabels[size-1].Name}, + {"get not-found label", "benchmark"}, } { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() @@ -416,27 +488,34 @@ func BenchmarkLabels_Get(b *testing.B) { } } +var comparisonBenchmarkScenarios = []struct { + desc string + base, other Labels +}{ + { + "equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + }, + { + "not equal", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), + }, + { + "different sizes", + FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), + FromStrings("a_label_name", "a_label_value"), + }, + { + "lots", + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), + FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), + }, +} + func BenchmarkLabels_Equals(b *testing.B) { - for _, scenario := range []struct { - desc string - base, other Labels - }{ - { - "equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - }, - { - "not equal", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"), - }, - { - "different sizes", - FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"), - FromStrings("a_label_name", "a_label_value"), - }, - } { + for _, scenario := range comparisonBenchmarkScenarios { b.Run(scenario.desc, func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { @@ -446,6 +525,17 @@ func BenchmarkLabels_Equals(b *testing.B) { } } +func BenchmarkLabels_Compare(b *testing.B) { + for _, scenario := range comparisonBenchmarkScenarios { + b.Run(scenario.desc, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = Compare(scenario.base, scenario.other) + } + }) + } +} + func TestLabels_Copy(t *testing.T) { require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").Copy()) } @@ -477,13 +567,17 @@ func TestBuilder(t *testing.T) { base: FromStrings("aaa", "111"), want: FromStrings("aaa", "111"), }, + { + base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + set: []Label{{"aaa", "444"}, {"bbb", "555"}, {"ccc", "666"}}, + want: FromStrings("aaa", "444", "bbb", "555", "ccc", "666"), + }, { base: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), del: []string{"bbb"}, want: FromStrings("aaa", "111", "ccc", "333"), }, { - base: nil, set: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}, del: []string{"bbb"}, want: FromStrings("aaa", "111", "ccc", "333"), @@ -540,7 +634,61 @@ func TestBuilder(t *testing.T) { b.Keep(tcase.keep...) } b.Del(tcase.del...) - require.Equal(t, tcase.want, b.Labels(tcase.base)) + require.Equal(t, tcase.want, b.Labels()) + + // Check what happens when we call Range and mutate the builder. + b.Range(func(l Label) { + if l.Name == "aaa" || l.Name == "bbb" { + b.Del(l.Name) + } + }) + require.Equal(t, tcase.want.BytesWithoutLabels(nil, "aaa", "bbb"), b.Labels().Bytes(nil)) + }) + } + t.Run("set_after_del", func(t *testing.T) { + b := NewBuilder(FromStrings("aaa", "111")) + b.Del("bbb") + b.Set("bbb", "222") + require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), b.Labels()) + require.Equal(t, "222", b.Get("bbb")) + }) +} + +func TestScratchBuilder(t *testing.T) { + for i, tcase := range []struct { + add []Label + want Labels + }{ + { + add: []Label{}, + want: EmptyLabels(), + }, + { + add: []Label{{"aaa", "111"}}, + want: FromStrings("aaa", "111"), + }, + { + add: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}, + want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + }, + { + add: []Label{{"bbb", "222"}, {"aaa", "111"}, {"ccc", "333"}}, + want: FromStrings("aaa", "111", "bbb", "222", "ccc", "333"), + }, + { + add: []Label{{"ddd", "444"}}, + want: FromStrings("ddd", "444"), + }, + } { + t.Run(fmt.Sprint(i), func(t *testing.T) { + b := ScratchBuilder{} + for _, lbl := range tcase.add { + b.Add(lbl.Name, lbl.Value) + } + b.Sort() + require.Equal(t, tcase.want, b.Labels()) + b.Assign(tcase.want) + require.Equal(t, tcase.want, b.Labels()) }) } } @@ -548,8 +696,7 @@ func TestBuilder(t *testing.T) { func TestLabels_Hash(t *testing.T) { lbls := FromStrings("foo", "bar", "baz", "qux") require.Equal(t, lbls.Hash(), lbls.Hash()) - require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.") - require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.") + require.NotEqual(t, lbls.Hash(), FromStrings("foo", "bar").Hash(), "different labels match.") } var benchmarkLabelsResult uint64 @@ -567,7 +714,7 @@ func BenchmarkLabels_Hash(b *testing.B) { // Label ~20B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } - return b.Labels(nil) + return b.Labels() }(), }, { @@ -578,7 +725,7 @@ func BenchmarkLabels_Hash(b *testing.B) { // Label ~50B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } - return b.Labels(nil) + return b.Labels() }(), }, { @@ -607,6 +754,50 @@ func BenchmarkLabels_Hash(b *testing.B) { } } +func BenchmarkBuilder(b *testing.B) { + m := []Label{ + {"job", "node"}, + {"instance", "123.123.1.211:9090"}, + {"path", "/api/v1/namespaces//deployments/"}, + {"method", "GET"}, + {"namespace", "system"}, + {"status", "500"}, + {"prometheus", "prometheus-core-1"}, + {"datacenter", "eu-west-1"}, + {"pod_name", "abcdef-99999-defee"}, + } + + var l Labels + builder := NewBuilder(EmptyLabels()) + for i := 0; i < b.N; i++ { + builder.Reset(EmptyLabels()) + for _, l := range m { + builder.Set(l.Name, l.Value) + } + l = builder.Labels() + } + require.Equal(b, 9, l.Len()) +} + +func BenchmarkLabels_Copy(b *testing.B) { + m := map[string]string{ + "job": "node", + "instance": "123.123.1.211:9090", + "path": "/api/v1/namespaces//deployments/", + "method": "GET", + "namespace": "system", + "status": "500", + "prometheus": "prometheus-core-1", + "datacenter": "eu-west-1", + "pod_name": "abcdef-99999-defee", + } + l := FromMap(m) + + for i := 0; i < b.N; i++ { + l = l.Copy() + } +} + func TestMarshaling(t *testing.T) { lbls := FromStrings("aaa", "111", "bbb", "2222", "ccc", "33333") expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}" diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index 14615a50d1..d26e9329f2 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -102,12 +102,12 @@ func TestInverse(t *testing.T) { expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"}, }, { - matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"}, - expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"}, + matcher: &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3.*"}, + expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3.*"}, }, { - matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"}, - expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"}, + matcher: &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4.*"}, + expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4.*"}, }, } @@ -123,3 +123,13 @@ func BenchmarkMatchType_String(b *testing.B) { _ = MatchType(i % int(MatchNotRegexp+1)).String() } } + +func BenchmarkNewMatcher(b *testing.B) { + b.Run("regex matcher with literal", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i <= b.N; i++ { + NewMatcher(MatchRegexp, "foo", "bar") + } + }) +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index e09a63772f..14319c7f7a 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -25,9 +25,16 @@ type FastRegexMatcher struct { prefix string suffix string contains string + + // shortcut for literals + literal bool + value string } func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + if isLiteral(v) { + return &FastRegexMatcher{literal: true, value: v}, nil + } re, err := regexp.Compile("^(?:" + v + ")$") if err != nil { return nil, err @@ -50,6 +57,9 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { } func (m *FastRegexMatcher) MatchString(s string) bool { + if m.literal { + return s == m.value + } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false } @@ -63,9 +73,16 @@ func (m *FastRegexMatcher) MatchString(s string) bool { } func (m *FastRegexMatcher) GetRegexString() string { + if m.literal { + return m.value + } return m.re.String() } +func isLiteral(re string) bool { + return regexp.QuoteMeta(re) == re +} + // optimizeConcatRegex returns literal prefix/suffix text that can be safely // checked against the label value before running the regexp matcher. func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { diff --git a/model/labels/test_utils.go b/model/labels/test_utils.go index a683588d16..05b8168825 100644 --- a/model/labels/test_utils.go +++ b/model/labels/test_utils.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "os" - "sort" "strings" ) @@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) { defer f.Close() scanner := bufio.NewScanner(f) + b := ScratchBuilder{} var mets []Labels hashes := map[uint64]struct{}{} i := 0 for scanner.Scan() && i < n { - m := make(Labels, 0, 10) + b.Reset() r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) @@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) { labelChunks := strings.Split(s, ",") for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") - m = append(m, Label{Name: split[0], Value: split[1]}) + b.Add(split[0], split[1]) } // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - sort.Sort(m) + b.Sort() + m := b.Labels() h := m.Hash() if _, ok := hashes[h]; ok { diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index b4bb999f75..fadf35b867 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -15,6 +15,7 @@ package relabel import ( "crypto/md5" + "encoding/binary" "fmt" "strings" @@ -45,6 +46,10 @@ const ( Keep Action = "keep" // Drop drops targets for which the input does match the regex. Drop Action = "drop" + // KeepEqual drops targets for which the input does not match the target. + KeepEqual Action = "keepequal" + // Drop drops targets for which the input does match the target. + DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" // LabelMap copies labels to other labelnames based on a regex. @@ -66,7 +71,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } switch act := Action(strings.ToLower(s)); act { - case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase: + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase, KeepEqual, DropEqual: *a = act return nil } @@ -109,13 +114,13 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Modulus == 0 && c.Action == HashMod { return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") } - if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" { + if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) { + if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !relabelTarget.MatchString(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement { + if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { @@ -125,6 +130,15 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } + if c.Action == DropEqual || c.Action == KeepEqual { + if c.Regex != DefaultRelabelConfig.Regex || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", c.Action) + } + } + if c.Action == LabelDrop || c.Action == LabelKeep { if c.SourceLabels != nil || c.TargetLabel != DefaultRelabelConfig.TargetLabel || @@ -193,39 +207,54 @@ func (re Regexp) String() string { // There are circumstances where Process will modify the input label. // If you want to avoid issues with the input label set being modified, at the cost of // higher memory usage, you can use lbls.Copy(). -// If a label set is dropped, nil is returned. -func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { - lb := labels.NewBuilder(nil) - for _, cfg := range cfgs { - lbls = relabel(lbls, cfg, lb) - if lbls == nil { - return nil - } +// If a label set is dropped, EmptyLabels and false is returned. +func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { + lb := labels.NewBuilder(lbls) + if !ProcessBuilder(lb, cfgs...) { + return labels.EmptyLabels(), false } - return lbls + return lb.Labels(), true } -func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { +// ProcessBuilder is like Process, but the caller passes a labels.Builder +// containing the initial set of labels, which is mutated by the rules. +func ProcessBuilder(lb *labels.Builder, cfgs ...*Config) (keep bool) { + for _, cfg := range cfgs { + keep = relabel(cfg, lb) + if !keep { + return false + } + } + return true +} + +func relabel(cfg *Config, lb *labels.Builder) (keep bool) { var va [16]string values := va[:0] if len(cfg.SourceLabels) > cap(values) { values = make([]string, 0, len(cfg.SourceLabels)) } for _, ln := range cfg.SourceLabels { - values = append(values, lset.Get(string(ln))) + values = append(values, lb.Get(string(ln))) } val := strings.Join(values, cfg.Separator) - lb.Reset(lset) - switch cfg.Action { case Drop: if cfg.Regex.MatchString(val) { - return nil + return false } case Keep: if !cfg.Regex.MatchString(val) { - return nil + return false + } + case DropEqual: + if lb.Get(cfg.TargetLabel) == val { + return false + } + case KeepEqual: + if lb.Get(cfg.TargetLabel) != val { + return false } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) @@ -249,42 +278,32 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels case Uppercase: lb.Set(cfg.TargetLabel, strings.ToUpper(val)) case HashMod: - mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus + hash := md5.Sum([]byte(val)) + // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. + mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) case LabelMap: - for _, l := range lset { + lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) lb.Set(res, l.Value) } - } + }) case LabelDrop: - for _, l := range lset { + lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) case LabelKeep: - for _, l := range lset { + lb.Range(func(l labels.Label) { if !cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) default: panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) } - return lb.Labels(lset) -} - -// sum64 sums the md5 hash to an uint64. -func sum64(hash [md5.Size]byte) uint64 { - var s uint64 - - for i, b := range hash { - shift := uint64((md5.Size - i - 1) * 8) - - s |= uint64(b) << shift - } - return s + return true } diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index c437e5c1c1..b50ff4010a 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -28,6 +28,7 @@ func TestRelabel(t *testing.T) { input labels.Labels relabel []*Config output labels.Labels + drop bool }{ { input: labels.FromMap(map[string]string{ @@ -101,7 +102,7 @@ func TestRelabel(t *testing.T) { Action: Replace, }, }, - output: nil, + drop: true, }, { input: labels.FromMap(map[string]string{ @@ -115,7 +116,7 @@ func TestRelabel(t *testing.T) { Action: Drop, }, }, - output: nil, + drop: true, }, { input: labels.FromMap(map[string]string{ @@ -177,7 +178,7 @@ func TestRelabel(t *testing.T) { Action: Keep, }, }, - output: nil, + drop: true, }, { input: labels.FromMap(map[string]string{ @@ -396,6 +397,34 @@ func TestRelabel(t *testing.T) { "foo": "bar", }), }, + { // From https://github.com/prometheus/prometheus/issues/12283 + input: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_container_port_name": "foo", + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + }), + relabel: []*Config{ + { + Regex: MustNewRegexp("^__meta_kubernetes_pod_container_port_name$"), + Action: LabelDrop, + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_annotation_XXX_metrics_port"}, + Regex: MustNewRegexp("(.+)"), + Action: Replace, + Replacement: "metrics", + TargetLabel: "__meta_kubernetes_pod_container_port_name", + }, + { + SourceLabels: model.LabelNames{"__meta_kubernetes_pod_container_port_name"}, + Regex: MustNewRegexp("^metrics$"), + Action: Keep, + }, + }, + output: labels.FromMap(map[string]string{ + "__meta_kubernetes_pod_annotation_XXX_metrics_port": "9091", + "__meta_kubernetes_pod_container_port_name": "metrics", + }), + }, { input: labels.FromMap(map[string]string{ "a": "foo", @@ -451,6 +480,74 @@ func TestRelabel(t *testing.T) { "foo_uppercase": "BAR123FOO", }), }, + { + input: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__tmp_port"}, + Action: KeepEqual, + TargetLabel: "__port1", + }, + }, + output: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + }, + { + input: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__tmp_port"}, + Action: DropEqual, + TargetLabel: "__port1", + }, + }, + drop: true, + }, + { + input: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__tmp_port"}, + Action: DropEqual, + TargetLabel: "__port2", + }, + }, + output: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + }, + { + input: labels.FromMap(map[string]string{ + "__tmp_port": "1234", + "__port1": "1234", + "__port2": "5678", + }), + relabel: []*Config{ + { + SourceLabels: model.LabelNames{"__tmp_port"}, + Action: KeepEqual, + TargetLabel: "__port2", + }, + }, + drop: true, + }, } for _, test := range tests { @@ -470,8 +567,11 @@ func TestRelabel(t *testing.T) { } } - res := Process(test.input, test.relabel...) - require.Equal(t, test.output, res) + res, keep := Process(test.input, test.relabel...) + require.Equal(t, !test.drop, keep) + if keep { + require.Equal(t, test.output, res) + } } } @@ -653,7 +753,7 @@ func BenchmarkRelabel(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = Process(tt.lbls, tt.cfgs...) + _, _ = Process(tt.lbls, tt.cfgs...) } }) } diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index f1d5f39257..30b3face0d 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -143,22 +143,24 @@ type RuleGroup struct { // Rule describes an alerting or recording rule. type Rule struct { - Record string `yaml:"record,omitempty"` - Alert string `yaml:"alert,omitempty"` - Expr string `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record string `yaml:"record,omitempty"` + Alert string `yaml:"alert,omitempty"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. type RuleNode struct { - Record yaml.Node `yaml:"record,omitempty"` - Alert yaml.Node `yaml:"alert,omitempty"` - Expr yaml.Node `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record yaml.Node `yaml:"record,omitempty"` + Alert yaml.Node `yaml:"alert,omitempty"` + Expr yaml.Node `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // Validate the rule and return a list of encountered errors. @@ -208,6 +210,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + if r.KeepFiringFor != 0 { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), + node: &r.Record, + }) + } if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), diff --git a/model/rulefmt/rulefmt_test.go b/model/rulefmt/rulefmt_test.go index d79ee94e29..d6499538e2 100644 --- a/model/rulefmt/rulefmt_test.go +++ b/model/rulefmt/rulefmt_test.go @@ -73,12 +73,21 @@ func TestParseFileFailure(t *testing.T) { filename: "invalid_label_name.bad.yaml", errMsg: "invalid label name", }, + { + filename: "record_and_for.bad.yaml", + errMsg: "invalid field 'for' in recording rule", + }, + { + filename: "record_and_keep_firing_for.bad.yaml", + errMsg: "invalid field 'keep_firing_for' in recording rule", + }, } for _, c := range table { _, errs := ParseFile(filepath.Join("testdata", c.filename)) require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) - require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename) + require.Error(t, errs[0]) + require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename) } } diff --git a/model/rulefmt/testdata/record_and_for.bad.yaml b/model/rulefmt/testdata/record_and_for.bad.yaml new file mode 100644 index 0000000000..a15d428a08 --- /dev/null +++ b/model/rulefmt/testdata/record_and_for.bad.yaml @@ -0,0 +1,6 @@ +groups: + - name: yolo + rules: + - record: Hello + expr: 1 + for: 1m diff --git a/model/rulefmt/testdata/record_and_keep_firing_for.bad.yaml b/model/rulefmt/testdata/record_and_keep_firing_for.bad.yaml new file mode 100644 index 0000000000..eb8192f05c --- /dev/null +++ b/model/rulefmt/testdata/record_and_keep_firing_for.bad.yaml @@ -0,0 +1,6 @@ +groups: + - name: yolo + rules: + - record: Hello + expr: 1 + keep_firing_for: 1m diff --git a/model/textparse/interface.go b/model/textparse/interface.go index 9300e2a40c..efa581410f 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -17,16 +17,23 @@ import ( "mime" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" ) // Parser parses samples from a byte slice of samples in the official // Prometheus and OpenMetrics text exposition formats. type Parser interface { - // Series returns the bytes of the series, the timestamp if set, and the value - // of the current sample. + // Series returns the bytes of a series with a simple float64 as a + // value, the timestamp if set, and the value of the current sample. Series() ([]byte, *int64, float64) + // Histogram returns the bytes of a series with a sparse histogram as a + // value, the timestamp if set, and the histogram in the current sample. + // Depending on the parsed input, the function returns an (integer) Histogram + // or a FloatHistogram, with the respective other return value being nil. + Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -64,28 +71,36 @@ type Parser interface { // // This function always returns a valid parser, but might additionally // return an error if the content type cannot be parsed. -func New(b []byte, contentType string) (Parser, error) { +func New(b []byte, contentType string, parseClassicHistograms bool) (Parser, error) { if contentType == "" { return NewPromParser(b), nil } mediaType, _, err := mime.ParseMediaType(contentType) - if err == nil && mediaType == "application/openmetrics-text" { - return NewOpenMetricsParser(b), nil + if err != nil { + return NewPromParser(b), err + } + switch mediaType { + case "application/openmetrics-text": + return NewOpenMetricsParser(b), nil + case "application/vnd.google.protobuf": + return NewProtobufParser(b, parseClassicHistograms), nil + default: + return NewPromParser(b), nil } - return NewPromParser(b), err } // Entry represents the type of a parsed entry. type Entry int const ( - EntryInvalid Entry = -1 - EntryType Entry = 0 - EntryHelp Entry = 1 - EntrySeries Entry = 2 - EntryComment Entry = 3 - EntryUnit Entry = 4 + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 // A series with a simple float64 as value. + EntryComment Entry = 3 + EntryUnit Entry = 4 + EntryHistogram Entry = 5 // A series with a native histogram as a value. ) // MetricType represents metric type values. diff --git a/model/textparse/interface_test.go b/model/textparse/interface_test.go index d94467d4db..de140d6819 100644 --- a/model/textparse/interface_test.go +++ b/model/textparse/interface_test.go @@ -91,7 +91,7 @@ func TestNewParser(t *testing.T) { tt := tt // Copy to local variable before going parallel. t.Parallel() - p, err := New([]byte{}, tt.contentType) + p, err := New([]byte{}, tt.contentType, false) tt.validateParser(t, p) if tt.err == "" { require.NoError(t, err) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index be1ffcd093..c17d40020a 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -17,22 +17,19 @@ package textparse import ( - "bytes" "errors" "fmt" "io" "math" - "sort" "strings" "unicode/utf8" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) -var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} - type openMetricsLexer struct { b []byte i int @@ -46,13 +43,6 @@ func (l *openMetricsLexer) buf() []byte { return l.b[l.start:l.i] } -func (l *openMetricsLexer) cur() byte { - if l.i < len(l.b) { - return l.b[l.i] - } - return byte(' ') -} - // next advances the openMetricsLexer to the next character. func (l *openMetricsLexer) next() byte { l.i++ @@ -81,6 +71,7 @@ func (l *openMetricsLexer) Error(es string) { // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { l *openMetricsLexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -112,6 +103,12 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not +// support sparse histograms yet. +func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -151,14 +148,11 @@ func (p *OpenMetricsParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -166,16 +160,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -197,17 +191,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { e.Ts = p.exemplarTs } + p.builder.Reset() for i := 0; i < len(p.eOffsets); i += 4 { a := p.eOffsets[i] - p.start b := p.eOffsets[i+1] - p.start c := p.eOffsets[i+2] - p.start d := p.eOffsets[i+3] - p.start - e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], s[c:d]) } - // Sort the labels. - sort.Sort(e.Labels) + p.builder.Sort() + e.Labels = p.builder.Labels() return true } @@ -218,6 +213,14 @@ func (p *OpenMetricsParser) nextToken() token { return tok } +func (p *OpenMetricsParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) +} + // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. func (p *OpenMetricsParser) Next() (Entry, error) { @@ -243,7 +246,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -279,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, errors.New("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } switch t { @@ -292,7 +295,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { u := yoloString(p.text) if len(u) > 0 { if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { - return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m) + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m) } } return EntryUnit, nil @@ -331,10 +334,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, errors.New("invalid timestamp") + return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { @@ -344,26 +347,20 @@ func (p *OpenMetricsParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, parseError("expected next entry after timestamp", t3) + return EntryInvalid, p.parseError("expected next entry after timestamp", t3) } default: - return EntryInvalid, parseError("expected timestamp or # symbol", t2) + return EntryInvalid, p.parseError("expected timestamp or # symbol", t2) } return EntrySeries, nil default: - err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } func (p *OpenMetricsParser) parseComment() error { - // Validate the name of the metric. It must have _total or _bucket as - // suffix for exemplars to be supported. - if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil { - return err - } - var err error // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets) @@ -390,19 +387,19 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return err + return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return errors.New("invalid exemplar timestamp") + return fmt.Errorf("invalid exemplar timestamp %f", ts) } p.exemplarTs = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: default: - return parseError("expected next entry after exemplar timestamp", t3) + return p.parseError("expected next entry after exemplar timestamp", t3) } default: - return parseError("expected timestamp or comment", t2) + return p.parseError("expected timestamp or comment", t2) } return nil } @@ -416,21 +413,21 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { return offsets, nil case tComma: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } t = p.nextToken() if t != tLName { - return nil, parseError("expected label name", t) + return nil, p.parseError("expected label name", t) } case tLName: if !first { - return nil, parseError("expected comma", t) + return nil, p.parseError("expected comma", t) } default: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } - return nil, parseError("expected comma or left brace", t) + return nil, p.parseError("expected comma or left brace", t) } first = false @@ -439,13 +436,13 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { offsets = append(offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return nil, parseError("expected equal", t) + return nil, p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return nil, parseError("expected label value", t) + return nil, p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return nil, errors.New("invalid UTF-8 label value") + return nil, fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The openMetricsLexer ensures the value string is quoted. Strip first @@ -456,11 +453,11 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { if t != tValue { - return 0, parseError(fmt.Sprintf("expected value after %v", after), t) + return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t) } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, err + return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { @@ -468,12 +465,3 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } - -func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error { - for _, suffix := range allowedSuffixes { - if bytes.HasSuffix(name, suffix) { - return nil - } - } - return fmt.Errorf("metric name %v does not support exemplars", string(name)) -} diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index fdb70c566b..d65e4977ef 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -45,9 +45,14 @@ hh_bucket{le="+Inf"} 1 # TYPE gh gaugehistogram gh_bucket{le="+Inf"} 1 # TYPE hhh histogram -hhh_bucket{le="+Inf"} 1 # {aa="bb"} 4 +hhh_bucket{le="+Inf"} 1 # {id="histogram-bucket-test"} 4 +hhh_count 1 # {id="histogram-count-test"} 4 # TYPE ggh gaugehistogram -ggh_bucket{le="+Inf"} 1 # {cc="dd",xx="yy"} 4 123.123 +ggh_bucket{le="+Inf"} 1 # {id="gaugehistogram-bucket-test",xx="yy"} 4 123.123 +ggh_count 1 # {id="gaugehistogram-count-test",xx="yy"} 4 123.123 +# TYPE smr_seconds summary +smr_seconds_count 2.0 # {id="summary-count-test"} 1 123.321 +smr_seconds_sum 42.0 # {id="summary-sum-test"} 1 123.321 # TYPE ii info ii{foo="bar"} 1 # TYPE ss stateset @@ -59,7 +64,7 @@ _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 # TYPE foo counter -foo_total 17.0 1520879607.789 # {xx="yy"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" @@ -152,7 +157,12 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5` m: `hhh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("aa", "bb"), Value: 4}, + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-bucket-test"), Value: 4}, + }, { + m: `hhh_count`, + v: 1, + lset: labels.FromStrings("__name__", "hhh_count"), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "histogram-count-test"), Value: 4}, }, { m: "ggh", typ: MetricTypeGaugeHistogram, @@ -160,7 +170,25 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5` m: `ggh_bucket{le="+Inf"}`, v: 1, lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"), - e: &exemplar.Exemplar{Labels: labels.FromStrings("cc", "dd", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-bucket-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, { + m: `ggh_count`, + v: 1, + lset: labels.FromStrings("__name__", "ggh_count"), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "gaugehistogram-count-test", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123}, + }, { + m: "smr_seconds", + typ: MetricTypeSummary, + }, { + m: `smr_seconds_count`, + v: 2, + lset: labels.FromStrings("__name__", "smr_seconds_count"), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-count-test"), Value: 1, HasTs: true, Ts: 123321}, + }, { + m: `smr_seconds_sum`, + v: 42, + lset: labels.FromStrings("__name__", "smr_seconds_sum"), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "summary-sum-test"), Value: 1, HasTs: true, Ts: 123321}, }, { m: "ii", typ: MetricTypeInfo, @@ -206,7 +234,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5` v: 17, lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), - e: &exemplar.Exemplar{Labels: labels.FromStrings("xx", "yy"), Value: 5}, + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, }, { m: "metric", help: "foo\x00bar", @@ -237,9 +265,7 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5` p.Metric(&res) found := p.Exemplar(&e) require.Equal(t, exp[i].m, string(m)) - if e.HasTs { - require.Equal(t, exp[i].t, ts) - } + require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].lset, res) if exp[i].e == nil { @@ -248,7 +274,6 @@ foo_total 17.0 1520879607.789 # {xx="yy"} 5` require.Equal(t, true, found) require.Equal(t, *exp[i].e, e) } - res = res[:0] case EntryType: m, typ := p.Type() @@ -296,11 +321,11 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "\n", - err: "\"INVALID\" \"\\n\" is not a valid start token", + err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"", }, { input: "metric", - err: "expected value after metric, got \"EOF\"", + err: "expected value after metric, got \"metric\" (\"EOF\") while parsing: \"metric\"", }, { input: "metric 1", @@ -316,19 +341,19 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "a\n#EOF\n", - err: "expected value after metric, got \"INVALID\"", + err: "expected value after metric, got \"\\n\" (\"INVALID\") while parsing: \"a\\n\"", }, { input: "\n\n#EOF\n", - err: "\"INVALID\" \"\\n\" is not a valid start token", + err: "expected a valid start token, got \"\\n\" (\"INVALID\") while parsing: \"\\n\"", }, { input: " a 1\n#EOF\n", - err: "\"INVALID\" \" \" is not a valid start token", + err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"", }, { input: "9\n#EOF\n", - err: "\"INVALID\" \"9\" is not a valid start token", + err: "expected a valid start token, got \"9\" (\"INVALID\") while parsing: \"9\"", }, { input: "# TYPE u untyped\n#EOF\n", @@ -340,11 +365,11 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "# TYPE c counter\n#EOF\n", - err: "\"INVALID\" \" \" is not a valid start token", + err: "expected a valid start token, got \"# \" (\"INVALID\") while parsing: \"# \"", }, { input: "# TYPE \n#EOF\n", - err: "expected metric name after TYPE, got \"INVALID\"", + err: "expected metric name after TYPE, got \"\\n\" (\"INVALID\") while parsing: \"# TYPE \\n\"", }, { input: "# TYPE m\n#EOF\n", @@ -352,19 +377,19 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "# UNIT metric suffix\n#EOF\n", - err: "unit not a suffix of metric \"metric\"", + err: "unit \"suffix\" not a suffix of metric \"metric\"", }, { input: "# UNIT metricsuffix suffix\n#EOF\n", - err: "unit not a suffix of metric \"metricsuffix\"", + err: "unit \"suffix\" not a suffix of metric \"metricsuffix\"", }, { input: "# UNIT m suffix\n#EOF\n", - err: "unit not a suffix of metric \"m\"", + err: "unit \"suffix\" not a suffix of metric \"m\"", }, { input: "# UNIT \n#EOF\n", - err: "expected metric name after UNIT, got \"INVALID\"", + err: "expected metric name after UNIT, got \"\\n\" (\"INVALID\") while parsing: \"# UNIT \\n\"", }, { input: "# UNIT m\n#EOF\n", @@ -372,7 +397,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "# HELP \n#EOF\n", - err: "expected metric name after HELP, got \"INVALID\"", + err: "expected metric name after HELP, got \"\\n\" (\"INVALID\") while parsing: \"# HELP \\n\"", }, { input: "# HELP m\n#EOF\n", @@ -380,27 +405,27 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "a\t1\n#EOF\n", - err: "expected value after metric, got \"INVALID\"", + err: "expected value after metric, got \"\\t\" (\"INVALID\") while parsing: \"a\\t\"", }, { input: "a 1\t2\n#EOF\n", - err: "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax", + err: "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax while parsing: \"a 1\\t2\"", }, { input: "a 1 2 \n#EOF\n", - err: "expected next entry after timestamp, got \"INVALID\"", + err: "expected next entry after timestamp, got \" \\n\" (\"INVALID\") while parsing: \"a 1 2 \\n\"", }, { input: "a 1 2 #\n#EOF\n", - err: "expected next entry after timestamp, got \"TIMESTAMP\"", + err: "expected next entry after timestamp, got \" #\\n\" (\"TIMESTAMP\") while parsing: \"a 1 2 #\\n\"", }, { input: "a 1 1z\n#EOF\n", - err: "strconv.ParseFloat: parsing \"1z\": invalid syntax", + err: "strconv.ParseFloat: parsing \"1z\": invalid syntax while parsing: \"a 1 1z\"", }, { input: " # EOF\n", - err: "\"INVALID\" \" \" is not a valid start token", + err: "expected a valid start token, got \" \" (\"INVALID\") while parsing: \" \"", }, { input: "# EOF\na 1", @@ -416,7 +441,7 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "#\tTYPE c counter\n", - err: "\"INVALID\" \"\\t\" is not a valid start token", + err: "expected a valid start token, got \"#\\t\" (\"INVALID\") while parsing: \"#\\t\"", }, { input: "# TYPE c counter\n", @@ -424,135 +449,131 @@ func TestOpenMetricsParseErrors(t *testing.T) { }, { input: "a 1 1 1\n# EOF\n", - err: "expected next entry after timestamp, got \"TIMESTAMP\"", + err: "expected next entry after timestamp, got \" 1\\n\" (\"TIMESTAMP\") while parsing: \"a 1 1 1\\n\"", }, { input: "a{b='c'} 1\n# EOF\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"", }, { input: "a{b=\"c\",} 1\n# EOF\n", - err: "expected label name, got \"BCLOSE\"", + err: "expected label name, got \"} \" (\"BCLOSE\") while parsing: \"a{b=\\\"c\\\",} \"", }, { input: "a{,b=\"c\"} 1\n# EOF\n", - err: "expected label name or left brace, got \"COMMA\"", + err: "expected label name or left brace, got \",b\" (\"COMMA\") while parsing: \"a{,b\"", }, { input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n", - err: "expected comma, got \"LNAME\"", + err: "expected comma, got \"d=\" (\"LNAME\") while parsing: \"a{b=\\\"c\\\"d=\"", }, { input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n", - err: "expected label name, got \"COMMA\"", + err: "expected label name, got \",d\" (\"COMMA\") while parsing: \"a{b=\\\"c\\\",,d\"", }, { input: "a{b=\n# EOF\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\n\" (\"INVALID\") while parsing: \"a{b=\\n\"", }, { input: "a{\xff=\"foo\"} 1\n# EOF\n", - err: "expected label name or left brace, got \"INVALID\"", + err: "expected label name or left brace, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"", }, { input: "a{b=\"\xff\"} 1\n# EOF\n", - err: "invalid UTF-8 label value", + err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"", }, { input: "a true\n", - err: "strconv.ParseFloat: parsing \"true\": invalid syntax", + err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"", }, { input: "something_weird{problem=\"\n# EOF\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\"\\n\" (\"INVALID\") while parsing: \"something_weird{problem=\\\"\\n\"", }, { input: "empty_label_name{=\"\"} 0\n# EOF\n", - err: "expected label name or left brace, got \"EQUAL\"", + err: "expected label name or left brace, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"", }, { input: "foo 1_2\n\n# EOF\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 1_2\"", }, { input: "foo 0x1p-3\n\n# EOF\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 0x1p-3\"", }, { input: "foo 0x1P-3\n\n# EOF\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 0x1P-3\"", }, { input: "foo 0 1_2\n\n# EOF\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 0 1_2\"", }, { input: "custom_metric_total 1 # {aa=bb}\n# EOF\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"b\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=b\"", }, { input: "custom_metric_total 1 # {aa=\"bb\"}\n# EOF\n", - err: "expected value after exemplar labels, got \"INVALID\"", + err: "expected value after exemplar labels, got \"\\n\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\\n\"", }, { input: `custom_metric_total 1 # {aa="bb"}`, - err: "expected value after exemplar labels, got \"EOF\"", - }, - { - input: `custom_metric 1 # {aa="bb"}`, - err: "metric name custom_metric does not support exemplars", + err: "expected value after exemplar labels, got \"}\" (\"EOF\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"}\"", }, { input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`, - err: "expected label name, got \"COMMA\"", + err: "expected label name, got \",c\" (\"COMMA\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",,c\"", }, { input: `custom_metric_total 1 # {aa="bb"} 1_2`, - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} 1_2\"", }, { input: `custom_metric_total 1 # {aa="bb"} 0x1p-3`, - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} 0x1p-3\"", }, { input: `custom_metric_total 1 # {aa="bb"} true`, - err: "strconv.ParseFloat: parsing \"true\": invalid syntax", + err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\"} true\"", }, { input: `custom_metric_total 1 # {aa="bb",cc=}`, - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"}\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\"bb\\\",cc=}\"", }, { input: `custom_metric_total 1 # {aa=\"\xff\"} 9.0`, - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\\\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {aa=\\\\\"", }, { input: `{b="c",} 1`, - err: `"INVALID" "{" is not a valid start token`, + err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"", }, { input: `a 1 NaN`, - err: `invalid timestamp`, + err: `invalid timestamp NaN`, }, { input: `a 1 -Inf`, - err: `invalid timestamp`, + err: `invalid timestamp -Inf`, }, { input: `a 1 Inf`, - err: `invalid timestamp`, + err: `invalid timestamp +Inf`, }, { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 NaN", - err: `invalid exemplar timestamp`, + err: `invalid exemplar timestamp NaN`, }, { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", - err: `invalid exemplar timestamp`, + err: `invalid exemplar timestamp -Inf`, }, { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp`, + err: `invalid exemplar timestamp +Inf`, }, } @@ -589,35 +610,35 @@ func TestOMNullByteHandling(t *testing.T) { }, { input: "a{b=\x00\"ssss\"} 1\n# EOF\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"a{b=\\x00\"", }, { input: "a{b=\"\x00", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\"\\x00\" (\"INVALID\") while parsing: \"a{b=\\\"\\x00\"", }, { input: "a{b\x00=\"hiih\"} 1", - err: "expected equal, got \"INVALID\"", + err: "expected equal, got \"\\x00\" (\"INVALID\") while parsing: \"a{b\\x00\"", }, { input: "a\x00{b=\"ddd\"} 1", - err: "expected value after metric, got \"INVALID\"", + err: "expected value after metric, got \"\\x00\" (\"INVALID\") while parsing: \"a\\x00\"", }, { input: "#", - err: "\"INVALID\" \" \" is not a valid start token", + err: "expected a valid start token, got \"#\" (\"INVALID\") while parsing: \"#\"", }, { input: "# H", - err: "\"INVALID\" \" \" is not a valid start token", + err: "expected a valid start token, got \"# H\" (\"INVALID\") while parsing: \"# H\"", }, { input: "custom_metric_total 1 # {b=\x00\"ssss\"} 1\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {b=\\x00\"", }, { input: "custom_metric_total 1 # {b=\"\x00ss\"} 1\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\"\\x00\" (\"INVALID\") while parsing: \"custom_metric_total 1 # {b=\\\"\\x00\"", }, } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 758ea5a66a..94338a6660 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -21,13 +21,13 @@ import ( "fmt" "io" "math" - "sort" "strconv" "strings" "unicode/utf8" "unsafe" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) @@ -143,6 +143,7 @@ func (l *promlexer) Error(es string) { // Prometheus text exposition format. type PromParser struct { l *promlexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -167,6 +168,12 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } +// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text +// format does not support sparse histograms yet. +func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + // Help returns the metric name and help text in the current entry. // Must only be called after Next returned a help entry. // The returned byte slices become invalid after the next call to Next. @@ -205,14 +212,11 @@ func (p *PromParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *PromParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -220,23 +224,24 @@ func (p *PromParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } -// Exemplar writes the exemplar of the current sample into the passed -// exemplar. It returns if an exemplar exists. -func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } @@ -250,8 +255,12 @@ func (p *PromParser) nextToken() token { } } -func parseError(exp string, got token) error { - return fmt.Errorf("%s, got %q", exp, got) +func (p *PromParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } // Next advances the parser to the next sample. It returns false if no @@ -274,7 +283,7 @@ func (p *PromParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -304,11 +313,11 @@ func (p *PromParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after metadata", t) + return EntryInvalid, p.parseError("linebreak expected after metadata", t) } switch t { case tHelp: @@ -319,7 +328,7 @@ func (p *PromParser) Next() (Entry, error) { case tComment: p.text = p.l.buf() if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after comment", t) + return EntryInvalid, p.parseError("linebreak expected after comment", t) } return EntryComment, nil @@ -336,34 +345,34 @@ func (p *PromParser) Next() (Entry, error) { t2 = p.nextToken() } if t2 != tValue { - return EntryInvalid, parseError("expected value after metric", t) + return EntryInvalid, p.parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.val) { p.val = math.Float64frombits(value.NormalNaN) } p.hasTS = false - switch p.nextToken() { + switch t := p.nextToken(); t { case tLinebreak: break case tTimestamp: p.hasTS = true if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, parseError("expected next entry after timestamp", t) + return EntryInvalid, p.parseError("expected next entry after timestamp", t2) } default: - return EntryInvalid, parseError("expected timestamp or new record", t) + return EntryInvalid, p.parseError("expected timestamp or new record", t) } return EntrySeries, nil default: - err = fmt.Errorf("%q is not a valid start token", t) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } @@ -376,18 +385,18 @@ func (p *PromParser) parseLVals() error { return nil case tLName: default: - return parseError("expected label name", t) + return p.parseError("expected label name", t) } p.offsets = append(p.offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return parseError("expected equal", t) + return p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return parseError("expected label value", t) + return p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return fmt.Errorf("invalid UTF-8 label value") + return fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The promlexer ensures the value string is quoted. Strip first diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 6a1216da16..280f39b4f1 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -192,7 +192,6 @@ testmetric{label="\"bar\""} 1` require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].lset, res) - res = res[:0] case EntryType: m, typ := p.Type() @@ -220,63 +219,63 @@ func TestPromParseErrors(t *testing.T) { }{ { input: "a", - err: "expected value after metric, got \"MNAME\"", + err: "expected value after metric, got \"\\n\" (\"INVALID\") while parsing: \"a\\n\"", }, { input: "a{b='c'} 1\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"'\" (\"INVALID\") while parsing: \"a{b='\"", }, { input: "a{b=\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\n\" (\"INVALID\") while parsing: \"a{b=\\n\"", }, { input: "a{\xff=\"foo\"} 1\n", - err: "expected label name, got \"INVALID\"", + err: "expected label name, got \"\\xff\" (\"INVALID\") while parsing: \"a{\\xff\"", }, { input: "a{b=\"\xff\"} 1\n", - err: "invalid UTF-8 label value", + err: "invalid UTF-8 label value: \"\\\"\\xff\\\"\"", }, { input: "a true\n", - err: "strconv.ParseFloat: parsing \"true\": invalid syntax", + err: "strconv.ParseFloat: parsing \"true\": invalid syntax while parsing: \"a true\"", }, { input: "something_weird{problem=\"", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\"\\n\" (\"INVALID\") while parsing: \"something_weird{problem=\\\"\\n\"", }, { input: "empty_label_name{=\"\"} 0", - err: "expected label name, got \"EQUAL\"", + err: "expected label name, got \"=\\\"\" (\"EQUAL\") while parsing: \"empty_label_name{=\\\"\"", }, { input: "foo 1_2\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 1_2\"", }, { input: "foo 0x1p-3\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 0x1p-3\"", }, { input: "foo 0x1P-3\n", - err: "unsupported character in float", + err: "unsupported character in float while parsing: \"foo 0x1P-3\"", }, { input: "foo 0 1_2\n", - err: "expected next entry after timestamp, got \"MNAME\"", + err: "expected next entry after timestamp, got \"_\" (\"INVALID\") while parsing: \"foo 0 1_\"", }, { input: `{a="ok"} 1`, - err: `"INVALID" is not a valid start token`, + err: "expected a valid start token, got \"{\" (\"INVALID\") while parsing: \"{\"", }, { input: "# TYPE #\n#EOF\n", - err: "expected metric name after TYPE, got \"INVALID\"", + err: "expected metric name after TYPE, got \"#\" (\"INVALID\") while parsing: \"# TYPE #\"", }, { input: "# HELP #\n#EOF\n", - err: "expected metric name after HELP, got \"INVALID\"", + err: "expected metric name after HELP, got \"#\" (\"INVALID\") while parsing: \"# HELP #\"", }, } @@ -314,19 +313,23 @@ func TestPromNullByteHandling(t *testing.T) { }, { input: "a{b=\x00\"ssss\"} 1\n", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\x00\" (\"INVALID\") while parsing: \"a{b=\\x00\"", }, { input: "a{b=\"\x00", - err: "expected label value, got \"INVALID\"", + err: "expected label value, got \"\\\"\\x00\\n\" (\"INVALID\") while parsing: \"a{b=\\\"\\x00\\n\"", }, { input: "a{b\x00=\"hiih\"} 1", - err: "expected equal, got \"INVALID\"", + err: "expected equal, got \"\\x00\" (\"INVALID\") while parsing: \"a{b\\x00\"", }, { input: "a\x00{b=\"ddd\"} 1", - err: "expected value after metric, got \"MNAME\"", + err: "expected value after metric, got \"\\x00\" (\"INVALID\") while parsing: \"a\\x00\"", + }, + { + input: "a 0 1\x00", + err: "expected next entry after timestamp, got \"\\x00\" (\"INVALID\") while parsing: \"a 0 1\\x00\"", }, } @@ -414,7 +417,7 @@ func BenchmarkParse(b *testing.B) { case EntrySeries: m, _, _ := p.Series() - res := make(labels.Labels, 0, 5) + var res labels.Labels p.Metric(&res) total += len(m) @@ -426,7 +429,7 @@ func BenchmarkParse(b *testing.B) { }) b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) { total := 0 - res := make(labels.Labels, 0, 5) + var res labels.Labels b.SetBytes(int64(len(buf) / promtestdataSampleCount)) b.ReportAllocs() @@ -451,7 +454,6 @@ func BenchmarkParse(b *testing.B) { total += len(m) i++ - res = res[:0] } } } @@ -510,7 +512,7 @@ func BenchmarkGzip(b *testing.B) { k := b.N / promtestdataSampleCount b.ReportAllocs() - b.SetBytes(int64(n) / promtestdataSampleCount) + b.SetBytes(n / promtestdataSampleCount) b.ResetTimer() total := 0 diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go new file mode 100644 index 0000000000..b831251ad0 --- /dev/null +++ b/model/textparse/protobufparse.go @@ -0,0 +1,563 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "unicode/utf8" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus +// protobuf format and then present it as it if were parsed by a +// Prometheus-2-style text parser. This is only done so that we can easily plug +// in the protobuf format into Prometheus 2. For future use (with the final +// format that will be used for native histograms), we have to revisit the +// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing +// could be used in a similar fashion (byte-slice pointers into the raw +// payload), which requires some hand-coded protobuf handling. But the current +// parsers all expect the full series name (metric name plus label pairs) as one +// string, which is not how things are represented in the protobuf format. If +// the re-arrangement work is actually causing problems (which has to be seen), +// that expectation needs to be changed. +type ProtobufParser struct { + in []byte // The intput to parse. + inPos int // Position within the input. + metricPos int // Position within Metric slice. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 + // is the count. -1 is the sum. Otherwise it is the index within + // quantiles/buckets. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classit histogram. + + // state is marked by the entry we are processing. EntryInvalid implies + // that we have to decode the next MetricFamily. + state Entry + + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + + mf *dto.MetricFamily + + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + + // The following are just shenanigans to satisfy the Parser interface. + metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. +} + +// NewProtobufParser returns a parser for the payload in the byte slice. +func NewProtobufParser(b []byte, parseClassicHistograms bool) Parser { + return &ProtobufParser{ + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, + } +} + +// Series returns the bytes of a series with a simple float64 as a +// value, the timestamp if set, and the value of the current sample. +func (p *ProtobufParser) Series() ([]byte, *int64, float64) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + v float64 + ) + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + v = m.GetCounter().GetValue() + case dto.MetricType_GAUGE: + v = m.GetGauge().GetValue() + case dto.MetricType_UNTYPED: + v = m.GetUntyped().GetValue() + case dto.MetricType_SUMMARY: + s := m.GetSummary() + switch p.fieldPos { + case -2: + v = float64(s.GetSampleCount()) + case -1: + v = s.GetSampleSum() + // Need to detect a summaries without quantile here. + if len(s.GetQuantile()) == 0 { + p.fieldsDone = true + } + default: + v = s.GetQuantile()[p.fieldPos].GetValue() + } + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // This should only happen for a classic histogram. + h := m.GetHistogram() + switch p.fieldPos { + case -2: + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + case -1: + v = h.GetSampleSum() + default: + bb := h.GetBucket() + if p.fieldPos >= len(bb) { + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + } else { + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } + } + } + default: + panic("encountered unexpected metric type, this is a bug") + } + if ts != 0 { + return p.metricBytes.Bytes(), &ts, v + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, v +} + +// Histogram returns the bytes of a series with a native histogram as a value, +// the timestamp if set, and the native histogram in the current sample. +// +// The Compact method is called before returning the Histogram (or FloatHistogram). +// +// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0, +// the histogram is parsed and returned as a FloatHistogram and nil is returned +// as the (integer) Histogram return value. Otherwise, it is parsed and returned +// as an (integer) Histogram and nil is returned as the FloatHistogram return +// value. +func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + h = m.GetHistogram() + ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } + if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { + // It is a float histogram. + fh := histogram.FloatHistogram{ + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveCount(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeCount(), + } + for i, span := range h.GetPositiveSpan() { + fh.PositiveSpans[i].Offset = span.GetOffset() + fh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + fh.NegativeSpans[i].Offset = span.GetOffset() + fh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } + fh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, nil, &fh + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, nil, &fh + } + + sh := histogram.Histogram{ + Count: h.GetSampleCount(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCount(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveDelta(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeDelta(), + } + for i, span := range h.GetPositiveSpan() { + sh.PositiveSpans[i].Offset = span.GetOffset() + sh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + sh.NegativeSpans[i].Offset = span.GetOffset() + sh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } + sh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, &sh, nil + } + return p.metricBytes.Bytes(), nil, &sh, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Help() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Type() ([]byte, MetricType) { + n := p.metricBytes.Bytes() + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + return n, MetricTypeCounter + case dto.MetricType_GAUGE: + return n, MetricTypeGauge + case dto.MetricType_HISTOGRAM: + return n, MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, MetricTypeGaugeHistogram + case dto.MetricType_SUMMARY: + return n, MetricTypeSummary + } + return n, MetricTypeUnknown +} + +// Unit always returns (nil, nil) because units aren't supported by the protobuf +// format. +func (p *ProtobufParser) Unit() ([]byte, []byte) { + return nil, nil +} + +// Comment always returns nil because comments aren't supported by the protobuf +// format. +func (p *ProtobufParser) Comment() []byte { + return nil +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *ProtobufParser) Metric(l *labels.Labels) string { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) + } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + *l = p.builder.Labels() + + return p.metricBytes.String() +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists or not. In case of a native +// histogram, the legacy bucket section is still used for exemplars. To ingest +// all examplars, call the Exemplar method repeatedly until it returns false. +func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + m := p.mf.GetMetric()[p.metricPos] + var exProto *dto.Exemplar + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + exProto = m.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if p.state == EntrySeries { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if p.state == EntrySeries { + break + } + p.fieldPos++ + if exProto != nil { + break + } + } + default: + return false + } + if exProto == nil { + return false + } + ex.Value = exProto.GetValue() + if ts := exProto.GetTimestamp(); ts != nil { + ex.HasTs = true + ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) + } + p.builder.Reset() + for _, lp := range exProto.GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + p.builder.Sort() + ex.Labels = p.builder.Labels() + return true +} + +// Next advances the parser to the next "sample" (emulating the behavior of a +// text format parser). It returns (EntryInvalid, io.EOF) if no samples were +// read. +func (p *ProtobufParser) Next() (Entry, error) { + switch p.state { + case EntryInvalid: + p.metricPos = 0 + p.fieldPos = -2 + n, err := readDelimited(p.in[p.inPos:], p.mf) + p.inPos += n + if err != nil { + return p.state, err + } + + // Skip empty metric families. + if len(p.mf.GetMetric()) == 0 { + return p.Next() + } + + // We are at the beginning of a metric family. Put only the name + // into metricBytes and validate only name, help, and type for now. + name := p.mf.GetName() + if !model.IsValidMetricName(model.LabelValue(name)) { + return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + } + if help := p.mf.GetHelp(); !utf8.ValidString(help) { + return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } + p.metricBytes.Reset() + p.metricBytes.WriteString(name) + + p.state = EntryHelp + case EntryHelp: + p.state = EntryType + case EntryType: + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } else { + p.state = EntrySeries + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + case EntryHistogram, EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } + t := p.mf.GetType() + if p.state == EntrySeries && !p.fieldsDone && + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { + p.fieldPos++ + } else { + p.metricPos++ + p.fieldPos = -2 + p.fieldsDone = false + } + if p.metricPos >= len(p.mf.GetMetric()) { + p.state = EntryInvalid + return p.Next() + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + } + return p.state, nil +} + +func (p *ProtobufParser) updateMetricBytes() error { + b := p.metricBytes + b.Reset() + b.WriteString(p.getMagicName()) + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + b.WriteByte(model.SeparatorByte) + n := lp.GetName() + if !model.LabelName(n).IsValid() { + return errors.Errorf("invalid label name: %s", n) + } + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + v := lp.GetValue() + if !utf8.ValidString(v) { + return errors.Errorf("invalid label value: %s", v) + } + b.WriteString(v) + } + if needed, n, v := p.getMagicLabel(); needed { + b.WriteByte(model.SeparatorByte) + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + b.WriteString(v) + } + return nil +} + +// getMagicName usually just returns p.mf.GetType() but adds a magic suffix +// ("_count", "_sum", "_bucket") if needed according to the current parser +// state. +func (p *ProtobufParser) getMagicName() string { + t := p.mf.GetType() + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { + return p.mf.GetName() + } + if p.fieldPos == -2 { + return p.mf.GetName() + "_count" + } + if p.fieldPos == -1 { + return p.mf.GetName() + "_sum" + } + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + return p.mf.GetName() + "_bucket" + } + return p.mf.GetName() +} + +// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if +// so, its name and value. It also sets p.fieldsDone if applicable. +func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + if p.state == EntryHistogram || p.fieldPos < 0 { + return false, "", "" + } + switch p.mf.GetType() { + case dto.MetricType_SUMMARY: + qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + q := qq[p.fieldPos] + p.fieldsDone = p.fieldPos == len(qq)-1 + return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + if p.fieldPos >= len(bb) { + p.fieldsDone = true + return true, model.BucketLabel, "+Inf" + } + b := bb[p.fieldPos] + p.fieldsDone = math.IsInf(b.GetUpperBound(), +1) + return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound()) + } + return false, "", "" +} + +var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") + +// readDelimited is essentially doing what the function of the same name in +// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is +// specific to a MetricFamily, utilizes the more efficient gogo-protobuf +// unmarshaling, and acts on a byte slice directly without any additional +// staging buffers. +func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if len(b) == 0 { + return 0, io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return 0, errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + mf.Reset() + return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) +} + +// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// but appends ".0" if the resulting number would otherwise contain neither a +// "." nor an "e". +func formatOpenMetricsFloat(f float64) string { + // A few common cases hardcoded. + switch { + case f == 1: + return "1.0" + case f == 0: + return "0.0" + case f == -1: + return "-1.0" + case math.IsNaN(f): + return "NaN" + case math.IsInf(f, +1): + return "+Inf" + case math.IsInf(f, -1): + return "-Inf" + } + s := fmt.Sprint(f) + if strings.ContainsAny(s, "e.") { + return s + } + return s + ".0" +} + +// isNativeHistogram returns false iff the provided histograms has no sparse +// buckets and a zero threshold of 0 and a zero count of 0. In principle, this +// could still be meant to be a native histogram (with a zero threshold of 0 and +// no observations yet), but for now, we'll treat this case as a conventional +// histogram. +// +// TODO(beorn7): In the final format, there should be an unambiguous way of +// deciding if a histogram should be ingested as a conventional one or a native +// one. +func isNativeHistogram(h *dto.Histogram) bool { + return len(h.GetNegativeDelta()) > 0 || + len(h.GetPositiveDelta()) > 0 || + h.GetZeroCount() > 0 || + h.GetZeroThreshold() > 0 +} diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go new file mode 100644 index 0000000000..882cce59d3 --- /dev/null +++ b/model/textparse/protobufparse_test.go @@ -0,0 +1,1485 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + + dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" +) + +func createTestProtoBuf(t *testing.T) *bytes.Buffer { + testMetricFamilies := []string{ + `name: "go_build_info" +help: "Build information about the main Go module." +type: GAUGE +metric: < + label: < + name: "checksum" + value: "" + > + label: < + name: "path" + value: "github.com/prometheus/client_golang" + > + label: < + name: "version" + value: "(devel)" + > + gauge: < + value: 1 + > +> + +`, + `name: "go_memstats_alloc_bytes_total" +help: "Total number of bytes allocated, even if freed." +type: COUNTER +metric: < + counter: < + value: 1.546544e+06 + exemplar: < + label: < + name: "dummyID" + value: "42" + > + value: 12 + timestamp: < + seconds: 1625851151 + nanos: 233181499 + > + > + > +> + +`, + `name: "something_untyped" +help: "Just to test the untyped type." +type: UNTYPED +metric: < + untyped: < + value: 42 + > + timestamp_ms: 1234567 +> + +`, + `name: "test_histogram" +help: "Test histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_gauge_histogram" +help: "Like test_histogram but as gauge histogram." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_float_histogram" +help: "Test float histogram with many buckets removed to keep it manageable in size." +type: HISTOGRAM +metric: < + histogram: < + sample_count_float: 175.0 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count_float: 2.0 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count_float: 4.0 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count_float: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count_float: 2.0 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_count: 1.0 + negative_count: 3.0 + negative_count: -2.0 + negative_count: -1.0 + negative_count: 1.0 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_count: 1.0 + positive_count: 2.0 + positive_count: -1.0 + positive_count: -1.0 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_gauge_float_histogram" +help: "Like test_float_histogram but as gauge histogram." +type: GAUGE_HISTOGRAM +metric: < + histogram: < + sample_count_float: 175.0 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count_float: 2.0 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count_float: 4.0 + upper_bound: -0.0003899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count_float: 16 + upper_bound: -0.0002899999999999998 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.00029 + > + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count_float: 2.0 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_count: 1.0 + negative_count: 3.0 + negative_count: -2.0 + negative_count: -1.0 + negative_count: 1.0 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_count: 1.0 + positive_count: 2.0 + positive_count: -1.0 + positive_count: -1.0 + > + timestamp_ms: 1234568 +> + +`, + `name: "test_histogram2" +help: "Similar histogram as before but now without sparse buckets." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.000828 + bucket: < + cumulative_count: 2 + upper_bound: -0.00048 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.00038 + exemplar: < + label: < + name: "dummyID" + value: "59727" + > + value: -0.00038 + timestamp: < + seconds: 1625851153 + nanos: 146848499 + > + > + > + bucket: < + cumulative_count: 16 + upper_bound: 1 + exemplar: < + label: < + name: "dummyID" + value: "5617" + > + value: -0.000295 + > + > + schema: 0 + zero_threshold: 0 + > +> + +`, + `name: "rpc_durations_seconds" +help: "RPC latency distributions." +type: SUMMARY +metric: < + label: < + name: "service" + value: "exponential" + > + summary: < + sample_count: 262 + sample_sum: 0.00025551262820703587 + quantile: < + quantile: 0.5 + value: 6.442786329648548e-07 + > + quantile: < + quantile: 0.9 + value: 1.9435742936658396e-06 + > + quantile: < + quantile: 0.99 + value: 4.0471608667037015e-06 + > + > +> +`, + `name: "without_quantiles" +help: "A summary without quantiles." +type: SUMMARY +metric: < + summary: < + sample_count: 42 + sample_sum: 1.234 + > +> +`, + } + + varintBuf := make([]byte, binary.MaxVarintLen32) + buf := &bytes.Buffer{} + + for _, tmf := range testMetricFamilies { + pb := &dto.MetricFamily{} + // From text to proto message. + require.NoError(t, proto.UnmarshalText(tmf, pb)) + // From proto message to binary protobuf. + protoBuf, err := proto.Marshal(pb) + require.NoError(t, err) + + // Write first length, then binary protobuf. + varintLength := binary.PutUvarint(varintBuf, uint64(len(protoBuf))) + buf.Write(varintBuf[:varintLength]) + buf.Write(protoBuf) + } + + return buf +} + +func TestProtobufParse(t *testing.T) { + type parseResult struct { + lset labels.Labels + m string + t int64 + v float64 + typ MetricType + help string + unit string + comment string + shs *histogram.Histogram + fhs *histogram.FloatHistogram + e []exemplar.Exemplar + } + + inputBuf := createTestProtoBuf(t) + + scenarios := []struct { + name string + parser Parser + expected []parseResult + }{ + { + name: "ignore classic buckets of native histograms", + parser: NewProtobufParser(inputBuf.Bytes(), false), + expected: []parseResult{ + { + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { + m: "go_build_info", + typ: MetricTypeGauge, + }, + { + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { + m: "go_memstats_alloc_bytes_total", + typ: MetricTypeCounter, + }, + { + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { + m: "something_untyped", + typ: MetricTypeUnknown, + }, + { + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_histogram", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { + m: "test_gauge_histogram", + typ: MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { + m: "test_float_histogram", + typ: MetricTypeHistogram, + }, + { + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { + m: "test_gauge_float_histogram", + typ: MetricTypeGaugeHistogram, + }, + { + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { + m: "test_histogram2", + typ: MetricTypeHistogram, + }, + { + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { + m: "rpc_durations_seconds", + typ: MetricTypeSummary, + }, + { + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { + m: "without_quantiles", + typ: MetricTypeSummary, + }, + { + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + }, + }, + { + name: "parse classic and native buckets", + parser: NewProtobufParser(inputBuf.Bytes(), true), + expected: []parseResult{ + { // 0 + m: "go_build_info", + help: "Build information about the main Go module.", + }, + { // 1 + m: "go_build_info", + typ: MetricTypeGauge, + }, + { // 2 + m: "go_build_info\xFFchecksum\xFF\xFFpath\xFFgithub.com/prometheus/client_golang\xFFversion\xFF(devel)", + v: 1, + lset: labels.FromStrings( + "__name__", "go_build_info", + "checksum", "", + "path", "github.com/prometheus/client_golang", + "version", "(devel)", + ), + }, + { // 3 + m: "go_memstats_alloc_bytes_total", + help: "Total number of bytes allocated, even if freed.", + }, + { // 4 + m: "go_memstats_alloc_bytes_total", + typ: MetricTypeCounter, + }, + { // 5 + m: "go_memstats_alloc_bytes_total", + v: 1.546544e+06, + lset: labels.FromStrings( + "__name__", "go_memstats_alloc_bytes_total", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "42"), Value: 12, HasTs: true, Ts: 1625851151233}, + }, + }, + { // 6 + m: "something_untyped", + help: "Just to test the untyped type.", + }, + { // 7 + m: "something_untyped", + typ: MetricTypeUnknown, + }, + { // 8 + m: "something_untyped", + t: 1234567, + v: 42, + lset: labels.FromStrings( + "__name__", "something_untyped", + ), + }, + { // 9 + m: "test_histogram", + help: "Test histogram with many buckets removed to keep it manageable in size.", + }, + { // 10 + m: "test_histogram", + typ: MetricTypeHistogram, + }, + { // 11 + m: "test_histogram", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 12 + m: "test_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_count", + ), + }, + { // 13 + m: "test_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_sum", + ), + }, + { // 14 + m: "test_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 15 + m: "test_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 16 + m: "test_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 17 + m: "test_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_bucket", + "le", "+Inf", + ), + }, + { // 18 + m: "test_gauge_histogram", + help: "Like test_histogram but as gauge histogram.", + }, + { // 19 + m: "test_gauge_histogram", + typ: MetricTypeGaugeHistogram, + }, + { // 20 + m: "test_gauge_histogram", + t: 1234568, + shs: &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 21 + m: "test_gauge_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_count", + ), + }, + { // 22 + m: "test_gauge_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_sum", + ), + }, + { // 23 + m: "test_gauge_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 24 + m: "test_gauge_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 25 + m: "test_gauge_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 26 + m: "test_gauge_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_histogram_bucket", + "le", "+Inf", + ), + }, + { // 27 + m: "test_float_histogram", + help: "Test float histogram with many buckets removed to keep it manageable in size.", + }, + { // 28 + m: "test_float_histogram", + typ: MetricTypeHistogram, + }, + { // 29 + m: "test_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 30 + m: "test_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_count", + ), + }, + { // 31 + m: "test_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_float_histogram_sum", + ), + }, + { // 32 + m: "test_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 33 + m: "test_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 34 + m: "test_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 35 + m: "test_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 36 + m: "test_gauge_float_histogram", + help: "Like test_float_histogram but as gauge histogram.", + }, + { // 37 + m: "test_gauge_float_histogram", + typ: MetricTypeGaugeHistogram, + }, + { // 38 + m: "test_gauge_float_histogram", + t: 1234568, + fhs: &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 175.0, + ZeroCount: 2.0, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []float64{1.0, 2.0, -1.0, -1.0}, + NegativeBuckets: []float64{1.0, 3.0, -2.0, -1.0, 1.0}, + }, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 39 + m: "test_gauge_float_histogram_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_count", + ), + }, + { // 40 + m: "test_gauge_float_histogram_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_sum", + ), + }, + { // 41 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 42 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0003899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 43 + m: "test_gauge_float_histogram_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "-0.0002899999999999998", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, + }, + }, + { // 44 + m: "test_gauge_float_histogram_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_gauge_float_histogram_bucket", + "le", "+Inf", + ), + }, + { // 45 + m: "test_histogram2", + help: "Similar histogram as before but now without sparse buckets.", + }, + { // 46 + m: "test_histogram2", + typ: MetricTypeHistogram, + }, + { // 47 + m: "test_histogram2_count", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_count", + ), + }, + { // 48 + m: "test_histogram2_sum", + v: 0.000828, + lset: labels.FromStrings( + "__name__", "test_histogram2_sum", + ), + }, + { // 49 + m: "test_histogram2_bucket\xffle\xff-0.00048", + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00048", + ), + }, + { // 50 + m: "test_histogram2_bucket\xffle\xff-0.00038", + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "-0.00038", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00038, HasTs: true, Ts: 1625851153146}, + }, + }, + { // 51 + m: "test_histogram2_bucket\xffle\xff1.0", + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "1.0", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.000295, HasTs: false}, + }, + }, + { // 52 + m: "test_histogram2_bucket\xffle\xff+Inf", + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram2_bucket", + "le", "+Inf", + ), + }, + { // 53 + m: "rpc_durations_seconds", + help: "RPC latency distributions.", + }, + { // 54 + m: "rpc_durations_seconds", + typ: MetricTypeSummary, + }, + { // 55 + m: "rpc_durations_seconds_count\xffservice\xffexponential", + v: 262, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_count", + "service", "exponential", + ), + }, + { // 56 + m: "rpc_durations_seconds_sum\xffservice\xffexponential", + v: 0.00025551262820703587, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds_sum", + "service", "exponential", + ), + }, + { // 57 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.5", + v: 6.442786329648548e-07, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.5", + "service", "exponential", + ), + }, + { // 58 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.9", + v: 1.9435742936658396e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.9", + "service", "exponential", + ), + }, + { // 59 + m: "rpc_durations_seconds\xffservice\xffexponential\xffquantile\xff0.99", + v: 4.0471608667037015e-06, + lset: labels.FromStrings( + "__name__", "rpc_durations_seconds", + "quantile", "0.99", + "service", "exponential", + ), + }, + { // 60 + m: "without_quantiles", + help: "A summary without quantiles.", + }, + { // 61 + m: "without_quantiles", + typ: MetricTypeSummary, + }, + { // 62 + m: "without_quantiles_count", + v: 42, + lset: labels.FromStrings( + "__name__", "without_quantiles_count", + ), + }, + { // 63 + m: "without_quantiles_sum", + v: 1.234, + lset: labels.FromStrings( + "__name__", "without_quantiles_sum", + ), + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + var ( + i int + res labels.Labels + p = scenario.parser + exp = scenario.expected + ) + + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + m, ts, v := p.Series() + + var e exemplar.Exemplar + p.Metric(&res) + found := p.Exemplar(&e) + require.Equal(t, exp[i].m, string(m)) + if ts != nil { + require.Equal(t, exp[i].t, *ts) + } else { + require.Equal(t, exp[i].t, int64(0)) + } + require.Equal(t, exp[i].v, v) + require.Equal(t, exp[i].lset, res) + if len(exp[i].e) == 0 { + require.Equal(t, false, found) + } else { + require.Equal(t, true, found) + require.Equal(t, exp[i].e[0], e) + } + + case EntryHistogram: + m, ts, shs, fhs := p.Histogram() + p.Metric(&res) + require.Equal(t, exp[i].m, string(m)) + if ts != nil { + require.Equal(t, exp[i].t, *ts) + } else { + require.Equal(t, exp[i].t, int64(0)) + } + require.Equal(t, exp[i].lset, res) + require.Equal(t, exp[i].m, string(m)) + if shs != nil { + require.Equal(t, exp[i].shs, shs) + } else { + require.Equal(t, exp[i].fhs, fhs) + } + j := 0 + for e := (exemplar.Exemplar{}); p.Exemplar(&e); j++ { + require.Equal(t, exp[i].e[j], e) + e = exemplar.Exemplar{} + } + require.Equal(t, len(exp[i].e), j, "not enough exemplars found") + + case EntryType: + m, typ := p.Type() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].typ, typ) + + case EntryHelp: + m, h := p.Help() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].help, string(h)) + + case EntryUnit: + m, u := p.Unit() + require.Equal(t, exp[i].m, string(m)) + require.Equal(t, exp[i].unit, string(u)) + + case EntryComment: + require.Equal(t, exp[i].comment, string(p.Comment())) + } + + i++ + } + require.Equal(t, len(exp), i) + }) + } +} diff --git a/notifier/notifier.go b/notifier/notifier.go index 99886bd4bc..891372c43e 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -19,11 +19,9 @@ import ( "encoding/json" "fmt" "io" - "net" "net/http" "net/url" "path" - "strings" "sync" "time" @@ -351,19 +349,6 @@ func (n *Manager) Send(alerts ...*Alert) { n.mtx.Lock() defer n.mtx.Unlock() - // Attach external labels before relabelling and sending. - for _, a := range alerts { - lb := labels.NewBuilder(a.Labels) - - for _, l := range n.opts.ExternalLabels { - if a.Labels.Get(l.Name) == "" { - lb.Set(l.Name, l.Value) - } - } - - a.Labels = lb.Labels(a.Labels) - } - alerts = n.relabelAlerts(alerts) if len(alerts) == 0 { return @@ -392,15 +377,25 @@ func (n *Manager) Send(alerts ...*Alert) { n.setMore() } +// Attach external labels and process relabelling rules. func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { + lb := labels.NewBuilder(labels.EmptyLabels()) var relabeledAlerts []*Alert - for _, alert := range alerts { - labels := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if labels != nil { - alert.Labels = labels - relabeledAlerts = append(relabeledAlerts, alert) + for _, a := range alerts { + lb.Reset(a.Labels) + n.opts.ExternalLabels.Range(func(l labels.Label) { + if a.Labels.Get(l.Name) == "" { + lb.Set(l.Name, l.Value) + } + }) + + keep := relabel.ProcessBuilder(lb, n.opts.RelabelConfigs...) + if !keep { + continue } + a.Labels = lb.Labels() + relabeledAlerts = append(relabeledAlerts, a) } return relabeledAlerts } @@ -572,9 +567,9 @@ func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { apiLabelSet := models.LabelSet{} - for _, label := range modelLabelSet { + modelLabelSet.Range(func(label labels.Label) { apiLabelSet[label.Name] = label.Value - } + }) return apiLabelSet } @@ -703,72 +698,38 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string { func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) { var res []alertmanager var droppedAlertManagers []alertmanager + lb := labels.NewBuilder(labels.EmptyLabels()) for _, tlset := range tg.Targets { - lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels)) + lb.Reset(labels.EmptyLabels()) for ln, lv := range tlset { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } // Set configured scheme as the initial scheme label for overwrite. - lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme}) - lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)}) + lb.Set(model.SchemeLabel, cfg.Scheme) + lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion)) // Combine target labels with target group labels. for ln, lv := range tg.Labels { if _, ok := tlset[ln]; !ok { - lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + lb.Set(string(ln), string(lv)) } } - lset := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) - if lset == nil { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{lbls}) + preRelabel := lb.Labels() + keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) + if !keep { + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel}) continue } - lb := labels.NewBuilder(lset) - - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) bool { - // If we can split, a port exists and we don't have to add one. - if _, _, err := net.SplitHostPort(s); err == nil { - return false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return err == nil - } - addr := lset.Get(model.AddressLabel) - // If it's an address with no trailing port, infer it based on the used scheme. - if addPort(addr) { - // Addresses reaching this point are already wrapped in [] if necessary. - switch lset.Get(model.SchemeLabel) { - case "http", "": - addr = addr + ":80" - case "https": - addr = addr + ":443" - default: - return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - + addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return nil, nil, err } - // Meta labels are deleted after relabelling. Other internal labels propagate to - // the target which decides whether they will be part of their label set. - for _, l := range lset { - if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { - lb.Del(l.Name) - } - } - - res = append(res, alertmanagerLabels{lset}) + res = append(res, alertmanagerLabels{lb.Labels()}) } return res, droppedAlertManagers, nil } diff --git a/plugins.yml b/plugins.yml index c83c806db6..c10dabddb6 100644 --- a/plugins.yml +++ b/plugins.yml @@ -13,6 +13,7 @@ - github.com/prometheus/prometheus/discovery/moby - github.com/prometheus/prometheus/discovery/nomad - github.com/prometheus/prometheus/discovery/openstack +- github.com/prometheus/prometheus/discovery/ovhcloud - github.com/prometheus/prometheus/discovery/puppetdb - github.com/prometheus/prometheus/discovery/scaleway - github.com/prometheus/prometheus/discovery/triton diff --git a/plugins/plugins.go b/plugins/plugins.go index 60d8d791b1..e6994e2068 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -61,6 +61,9 @@ import ( // Register openstack plugin. _ "github.com/prometheus/prometheus/discovery/openstack" + // Register ovhcloud plugin. + _ "github.com/prometheus/prometheus/discovery/ovhcloud" + // Register puppetdb plugin. _ "github.com/prometheus/prometheus/discovery/puppetdb" diff --git a/prompb/README.md b/prompb/README.md index 8c19b17e9d..a33d7bfb88 100644 --- a/prompb/README.md +++ b/prompb/README.md @@ -4,6 +4,6 @@ re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run `make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.12.3) in your -PATH. +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in +your PATH. diff --git a/prompb/buf.yaml b/prompb/buf.yaml index acd7af9cf4..0f7ec13401 100644 --- a/prompb/buf.yaml +++ b/prompb/buf.yaml @@ -5,14 +5,17 @@ lint: ENUM_VALUE_PREFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto ENUM_ZERO_VALUE_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto PACKAGE_DIRECTORY_MATCH: - remote.proto - types.proto PACKAGE_VERSION_SUFFIX: - remote.proto - types.proto + - io/prometheus/client/metrics.proto deps: - buf.build/gogo/protobuf diff --git a/prompb/custom.go b/prompb/custom.go index 0b3820c4d2..13d6e0f0cd 100644 --- a/prompb/custom.go +++ b/prompb/custom.go @@ -13,5 +13,27 @@ package prompb +import ( + "sync" +) + func (m Sample) T() int64 { return m.Timestamp } func (m Sample) V() float64 { return m.Value } + +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/prompb/io/prometheus/client/metrics.pb.go b/prompb/io/prometheus/client/metrics.pb.go new file mode 100644 index 0000000000..3e4bc7df8f --- /dev/null +++ b/prompb/io/prometheus/client/metrics.pb.go @@ -0,0 +1,3997 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/client/metrics.proto + +package io_prometheus_client + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". + MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Gauge struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Counter struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +type Quantile struct { + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return m.Size() +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Summary struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return m.Size() +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Histogram struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil { + return m.SampleCountFloat + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil { + return m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil { + return m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +type Bucket struct { + CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"` + CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"` + UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return m.Size() +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil { + return m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil { + return m.CumulativeCountFloat + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil { + return m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +type Exemplar struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{9} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` + TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{10} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` + Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{11} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return m.Size() +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil { + return m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 923 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12, + 0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26, + 0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, + 0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f, + 0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80, + 0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27, + 0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58, + 0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41, + 0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7, + 0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5, + 0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50, + 0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68, + 0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91, + 0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf, + 0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94, + 0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36, + 0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03, + 0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1, + 0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4, + 0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61, + 0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1, + 0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac, + 0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81, + 0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76, + 0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13, + 0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08, + 0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d, + 0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29, + 0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18, + 0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f, + 0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c, + 0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95, + 0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8, + 0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95, + 0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22, + 0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5, + 0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49, + 0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a, + 0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53, + 0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5, + 0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae, + 0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8, + 0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a, + 0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26, + 0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c, + 0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c, + 0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87, + 0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60, + 0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b, + 0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96, + 0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c, + 0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef, + 0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1, + 0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a, + 0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f, + 0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00, +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Quantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Quantile) > 0 { + for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Untyped) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Untyped) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Untyped) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.PositiveCount) > 0 { + for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) + i-- + dAtA[i] = 0x72 + } + if len(m.PositiveDelta) > 0 { + var j3 int + dAtA5 := make([]byte, len(m.PositiveDelta)*10) + for _, num := range m.PositiveDelta { + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 + } + dAtA5[j3] = uint8(x4) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintMetrics(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveSpan) > 0 { + for iNdEx := len(m.PositiveSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.NegativeCount) > 0 { + for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) + i-- + dAtA[i] = 0x5a + } + if len(m.NegativeDelta) > 0 { + var j7 int + dAtA9 := make([]byte, len(m.NegativeDelta)*10) + for _, num := range m.NegativeDelta { + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ + } + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintMetrics(dAtA, i, uint64(j7)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeSpan) > 0 { + for iNdEx := len(m.NegativeSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ZeroCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x41 + } + if m.ZeroCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x38 + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x31 + } + if m.Schema != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x28 + } + if m.SampleCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if len(m.Bucket) > 0 { + for iNdEx := len(m.Bucket) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bucket[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CumulativeCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CumulativeCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UpperBound != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound)))) + i-- + dAtA[i] = 0x11 + } + if m.CumulativeCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.CumulativeCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TimestampMs != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x30 + } + if m.Untyped != nil { + { + size, err := m.Untyped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Summary != nil { + { + size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricFamily) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Type != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Quantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Quantile) > 0 { + for _, e := range m.Quantile { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Untyped) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Bucket) > 0 { + for _, e := range m.Bucket { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.SampleCountFloat != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozMetrics(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != 0 { + n += 1 + sovMetrics(uint64(m.ZeroCount)) + } + if m.ZeroCountFloat != 0 { + n += 9 + } + if len(m.NegativeSpan) > 0 { + for _, e := range m.NegativeSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.NegativeDelta) > 0 { + l = 0 + for _, e := range m.NegativeDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.NegativeCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.NegativeCount)*8)) + len(m.NegativeCount)*8 + } + if len(m.PositiveSpan) > 0 { + for _, e := range m.PositiveSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.PositiveDelta) > 0 { + l = 0 + for _, e := range m.PositiveDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.PositiveCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CumulativeCount != 0 { + n += 1 + sovMetrics(uint64(m.CumulativeCount)) + } + if m.UpperBound != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CumulativeCountFloat != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozMetrics(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovMetrics(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Summary != nil { + l = m.Summary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Untyped != nil { + l = m.Untyped.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovMetrics(uint64(m.TimestampMs)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MetricFamily) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMetrics(uint64(m.Type)) + } + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Quantile = append(m.Quantile, Quantile{}) + if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Untyped) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Untyped: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Untyped: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = append(m.Bucket, Bucket{}) + if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleCountFloat = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ZeroCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCountFloat = float64(math.Float64frombits(v)) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpan = append(m.NegativeSpan, BucketSpan{}) + if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDelta) == 0 { + m.NegativeDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDelta", wireType) + } + case 11: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCount) == 0 { + m.NegativeCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCount", wireType) + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpan = append(m.PositiveSpan, BucketSpan{}) + if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDelta) == 0 { + m.PositiveDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDelta", wireType) + } + case 14: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCount) == 0 { + m.PositiveCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCount", wireType) + } + m.CumulativeCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CumulativeCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.UpperBound = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.CumulativeCountFloat = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &types.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricFamily) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/prompb/io/prometheus/client/metrics.proto b/prompb/io/prometheus/client/metrics.proto new file mode 100644 index 0000000000..6bbea622f2 --- /dev/null +++ b/prompb/io/prometheus/client/metrics.proto @@ -0,0 +1,147 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is copied and lightly edited from +// github.com/prometheus/client_model/io/prometheus/client/metrics.proto +// and finally converted to proto3 syntax to make it usable for the +// gogo-protobuf approach taken within prometheus/prometheus. + +syntax = "proto3"; + +package io.prometheus.client; +option go_package = "io_prometheus_client"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +message LabelPair { + string name = 1; + string value = 2; +} + +enum MetricType { + // COUNTER must use the Metric field "counter". + COUNTER = 0; + // GAUGE must use the Metric field "gauge". + GAUGE = 1; + // SUMMARY must use the Metric field "summary". + SUMMARY = 2; + // UNTYPED must use the Metric field "untyped". + UNTYPED = 3; + // HISTOGRAM must use the Metric field "histogram". + HISTOGRAM = 4; + // GAUGE_HISTOGRAM must use the Metric field "histogram". + GAUGE_HISTOGRAM = 5; +} + +message Gauge { + double value = 1; +} + +message Counter { + double value = 1; + Exemplar exemplar = 2; +} + +message Quantile { + double quantile = 1; + double value = 2; +} + +message Summary { + uint64 sample_count = 1; + double sample_sum = 2; + repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; +} + +message Untyped { + double value = 1; +} + +message Histogram { + uint64 sample_count = 1; + double sample_count_float = 4; // Overrides sample_count if > 0. + double sample_sum = 2; + // Buckets for the conventional histogram. + repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + + // Everything below here is for native histograms (also known as sparse histograms). + // Native histograms are an experimental feature without stability guarantees. + + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + sint32 schema = 5; + double zero_threshold = 6; // Breadth of the zero bucket. + uint64 zero_count = 7; // Count in zero bucket. + double zero_count_float = 8; // Overrides sb_zero_count if > 0. + + // Negative buckets for the native histogram. + repeated BucketSpan negative_span = 9 [(gogoproto.nullable) = false]; + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_count = 11; // Absolute count of each bucket. + + // Positive buckets for the native histogram. + repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_count = 14; // Absolute count of each bucket. +} + +message Bucket { + uint64 cumulative_count = 1; // Cumulative in increasing order. + double cumulative_count_float = 4; // Overrides cumulative_count if > 0. + double upper_bound = 2; // Inclusive. + Exemplar exemplar = 3; +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +message Exemplar { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + double value = 2; + google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style. +} + +message Metric { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + Gauge gauge = 2; + Counter counter = 3; + Summary summary = 4; + Untyped untyped = 5; + Histogram histogram = 7; + int64 timestamp_ms = 6; +} + +message MetricFamily { + string name = 1; + string help = 2; + MetricType type = 3; + repeated Metric metric = 4 [(gogoproto.nullable) = false]; +} diff --git a/prompb/remote.pb.go b/prompb/remote.pb.go index b3cf44884f..19318878d7 100644 --- a/prompb/remote.pb.go +++ b/prompb/remote.pb.go @@ -34,8 +34,10 @@ const ( // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" ReadRequest_SAMPLES ReadRequest_ResponseType = 0 - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/prompb/remote.proto b/prompb/remote.proto index 70c6dd3fbb..b4f82f5f9d 100644 --- a/prompb/remote.proto +++ b/prompb/remote.proto @@ -39,8 +39,10 @@ message ReadRequest { // Content-Type: "application/x-protobuf" // Content-Encoding: "snappy" SAMPLES = 0; - // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. - // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. // // Response headers: // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" diff --git a/prompb/types.pb.go b/prompb/types.pb.go index e91dd55c4d..125f868e97 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -68,6 +68,37 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{0, 0} } +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3, 0} +} + type LabelMatcher_Type int32 const ( @@ -96,25 +127,31 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6, 0} + return fileDescriptor_d938547f84707355, []int{8, 0} } // We require this to match chunkenc.Encoding. type Chunk_Encoding int32 const ( - Chunk_UNKNOWN Chunk_Encoding = 0 - Chunk_XOR Chunk_Encoding = 1 + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 ) var Chunk_Encoding_name = map[int32]string{ 0: "UNKNOWN", 1: "XOR", + 2: "HISTOGRAM", + 3: "FLOAT_HISTOGRAM", } var Chunk_Encoding_value = map[string]int32{ - "UNKNOWN": 0, - "XOR": 1, + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, + "FLOAT_HISTOGRAM": 3, } func (x Chunk_Encoding) String() string { @@ -122,7 +159,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8, 0} + return fileDescriptor_d938547f84707355, []int{10, 0} } type MetricMetadata struct { @@ -321,23 +358,324 @@ func (m *Exemplar) GetTimestamp() int64 { return 0 } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{5} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,6 +725,13 @@ func (m *TimeSeries) GetExemplars() []Exemplar { return nil } +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + type Label struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -399,7 +744,7 @@ func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{6} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -453,7 +798,7 @@ func (m *Labels) Reset() { *m = Labels{} } func (m *Labels) String() string { return proto.CompactTextString(m) } func (*Labels) ProtoMessage() {} func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{7} } func (m *Labels) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -503,7 +848,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{8} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -570,7 +915,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} + return fileDescriptor_d938547f84707355, []int{9} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -664,7 +1009,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8} + return fileDescriptor_d938547f84707355, []int{10} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -736,7 +1081,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{9} + return fileDescriptor_d938547f84707355, []int{11} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,11 +1126,14 @@ func (m *ChunkedSeries) GetChunks() []Chunk { func init() { proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Labels)(nil), "prometheus.Labels") @@ -798,53 +1146,76 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46, - 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01, - 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84, - 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9, - 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29, - 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18, - 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f, - 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1, - 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76, - 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77, - 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8, - 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e, - 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65, - 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37, - 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0, - 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3, - 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf, - 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3, - 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1, - 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43, - 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4, - 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d, - 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8, - 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0, - 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a, - 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9, - 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7, - 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f, - 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35, - 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4, - 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60, - 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2, - 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f, - 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1, - 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb, - 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31, - 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3, - 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41, - 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f, - 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25, - 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5, - 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c, - 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac, - 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e, - 0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7, - 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00, + // 1092 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, + 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, + 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, + 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, + 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, + 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, + 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, + 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, + 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, + 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, + 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, + 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, + 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, + 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, + 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, + 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, + 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, + 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, + 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, + 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, + 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, + 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, + 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, + 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, + 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, + 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, + 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, + 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, + 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, + 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, + 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, + 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, + 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, + 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, + 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, + 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, + 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, + 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, + 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, + 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, + 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, + 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, + 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, + 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, + 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, + 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, + 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, + 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, + 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, + 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, + 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, + 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, + 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, + 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, + 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, + 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, + 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, + 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, + 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, + 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, + 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, + 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, + 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, + 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, + 0x13, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -990,6 +1361,251 @@ func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *TimeSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1014,6 +1630,20 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Exemplars) > 0 { for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { { @@ -1444,6 +2074,125 @@ func (m *Exemplar) Size() (n int) { return n } +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1468,6 +2217,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1989,6 +2744,623 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { } return nil } +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TimeSeries) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2120,6 +3492,40 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/prompb/types.proto b/prompb/types.proto index 6ba7074a5d..aa322515c3 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -54,13 +54,79 @@ message Exemplar { int64 timestamp = 3; } +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + // TimeSeries represents samples and labels for a single time series. message TimeSeries { // For a timeseries to be valid, and for the samples and exemplars // to be ingested by the remote system properly, the labels field is required. - repeated Label labels = 1 [(gogoproto.nullable) = false]; - repeated Sample samples = 2 [(gogoproto.nullable) = false]; - repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; } message Label { @@ -103,8 +169,10 @@ message Chunk { // We require this to match chunkenc.Encoding. enum Encoding { - UNKNOWN = 0; - XOR = 1; + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; } Encoding type = 3; bytes data = 4; diff --git a/promql/bench_test.go b/promql/bench_test.go index c3de6ca47d..6818498bf8 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -27,17 +27,7 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) -func BenchmarkRangeQuery(b *testing.B) { - stor := teststorage.New(b) - defer stor.Close() - opts := EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 50000000, - Timeout: 100 * time.Second, - } - engine := NewEngine(opts) - +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { metrics := []labels.Labels{} metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one")) @@ -65,25 +55,26 @@ func BenchmarkRangeQuery(b *testing.B) { } refs := make([]storage.SeriesRef, len(metrics)) - // A day of data plus 10k steps. - numIntervals := 8640 + 10000 - for s := 0; s < numIntervals; s++ { a := stor.Appender(context.Background()) - ts := int64(s * 10000) // 10s interval. + ts := int64(s * interval) for i, metric := range metrics { ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics))) refs[i] = ref } if err := a.Commit(); err != nil { - b.Fatal(err) + return err } } + return nil +} - type benchCase struct { - expr string - steps int - } +type benchCase struct { + expr string + steps int +} + +func rangeQueryCases() []benchCase { cases := []benchCase{ // Plain retrieval. { @@ -166,6 +157,9 @@ func BenchmarkRangeQuery(b *testing.B) { { expr: "topk(1, a_X)", }, + { + expr: "topk(5, a_X)", + }, // Combinations. { expr: "rate(a_X[1m]) + rate(b_X[1m])", @@ -183,6 +177,15 @@ func BenchmarkRangeQuery(b *testing.B) { { expr: "a_X + on(l) group_right a_one", }, + // Label compared to blank string. + { + expr: "count({__name__!=\"\"})", + steps: 1, + }, + { + expr: "count({__name__!=\"\",l=\"\"})", + steps: 1, + }, } // X in an expr will be replaced by different metric sizes. @@ -191,9 +194,9 @@ func BenchmarkRangeQuery(b *testing.B) { if !strings.Contains(c.expr, "X") { tmp = append(tmp, c) } else { - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "one", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "ten", -1), steps: c.steps}) - tmp = append(tmp, benchCase{expr: strings.Replace(c.expr, "X", "hundred", -1), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "one"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "ten"), steps: c.steps}) + tmp = append(tmp, benchCase{expr: strings.ReplaceAll(c.expr, "X", "hundred"), steps: c.steps}) } } cases = tmp @@ -210,20 +213,44 @@ func BenchmarkRangeQuery(b *testing.B) { tmp = append(tmp, benchCase{expr: c.expr, steps: 1000}) } } - cases = tmp + return tmp +} + +func BenchmarkRangeQuery(b *testing.B) { + stor := teststorage.New(b) + defer stor.Close() + opts := EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 50000000, + Timeout: 100 * time.Second, + } + engine := NewEngine(opts) + + const interval = 10000 // 10s interval. + // A day of data plus 10k steps. + numIntervals := 8640 + 10000 + + err := setupRangeQueryTestData(stor, engine, interval, numIntervals) + if err != nil { + b.Fatal(err) + } + cases := rangeQueryCases() + for _, c := range cases { name := fmt.Sprintf("expr=%s,steps=%d", c.expr, c.steps) b.Run(name, func(b *testing.B) { + ctx := context.Background() b.ReportAllocs() for i := 0; i < b.N; i++ { qry, err := engine.NewRangeQuery( - stor, nil, c.expr, + ctx, stor, nil, c.expr, time.Unix(int64((numIntervals-c.steps)*10), 0), time.Unix(int64(numIntervals*10), 0), time.Second*10) if err != nil { b.Fatal(err) } - res := qry.Exec(context.Background()) + res := qry.Exec(ctx) if res.Err != nil { b.Fatal(res.Err) } diff --git a/promql/engine.go b/promql/engine.go index e7da2a96fb..83bbdeff89 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -37,12 +37,15 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/util/zeropool" ) const ( @@ -67,6 +70,7 @@ type engineMetrics struct { queryPrepareTime prometheus.Observer queryInnerEval prometheus.Observer queryResultSort prometheus.Observer + querySamples prometheus.Counter } // convertibleToInt64 returns true if v does not over-/underflow an int64. @@ -126,11 +130,35 @@ type Query interface { String() string } -type QueryOpts struct { +type PrometheusQueryOpts struct { // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. - EnablePerStepStats bool + enablePerStepStats bool // Lookback delta duration for this query. - LookbackDelta time.Duration + lookbackDelta time.Duration +} + +var _ QueryOpts = &PrometheusQueryOpts{} + +func NewPrometheusQueryOpts(enablePerStepStats bool, lookbackDelta time.Duration) QueryOpts { + return &PrometheusQueryOpts{ + enablePerStepStats: enablePerStepStats, + lookbackDelta: lookbackDelta, + } +} + +func (p *PrometheusQueryOpts) EnablePerStepStats() bool { + return p.enablePerStepStats +} + +func (p *PrometheusQueryOpts) LookbackDelta() time.Duration { + return p.lookbackDelta +} + +type QueryOpts interface { + // Enables recording per-step statistics if the engine has it enabled as well. Disabled by default. + EnablePerStepStats() bool + // Lookback delta duration for this query. + LookbackDelta() time.Duration } // query implements the Query interface. @@ -186,7 +214,8 @@ func (q *query) Cancel() { // Close implements the Query interface. func (q *query) Close() { for _, s := range q.matrix { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } @@ -198,7 +227,6 @@ func (q *query) Exec(ctx context.Context) *Result { // Exec query. res, warnings, err := q.ng.exec(ctx, q) - return &Result{Err: err, Value: res, Warnings: warnings} } @@ -330,6 +358,12 @@ func NewEngine(opts EngineOpts) *Engine { Name: "queries_concurrent_max", Help: "The max number of concurrent queries.", }), + querySamples: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_samples_total", + Help: "The total number of samples loaded by all queries.", + }), queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"), @@ -355,6 +389,7 @@ func NewEngine(opts EngineOpts) *Engine { metrics.maxConcurrentQueries, metrics.queryLogEnabled, metrics.queryLogFailures, + metrics.querySamples, queryResultSummary, ) } @@ -397,69 +432,74 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { } // NewInstantQuery returns an evaluation query for the given expression at the given time. -func (ng *Engine) NewInstantQuery(q storage.Queryable, opts *QueryOpts, qs string, ts time.Time) (Query, error) { +func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0) + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { + return nil, err + } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } - qry, err := ng.newQuery(q, opts, expr, ts, ts, 0) - if err != nil { + if err := ng.validateOpts(expr); err != nil { return nil, err } - qry.q = qs + *pExpr = PreprocessExpr(expr, ts, ts) return qry, nil } // NewRangeQuery returns an evaluation query for the given time range and with // the resolution set by the interval. -func (ng *Engine) NewRangeQuery(q storage.Queryable, opts *QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { +func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error) { + pExpr, qry := ng.newQuery(q, qs, opts, start, end, interval) + finishQueue, err := ng.queueActive(ctx, qry) + if err != nil { + return nil, err + } + defer finishQueue() expr, err := parser.ParseExpr(qs) if err != nil { return nil, err } + if err := ng.validateOpts(expr); err != nil { + return nil, err + } if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry, err := ng.newQuery(q, opts, expr, start, end, interval) - if err != nil { - return nil, err - } - qry.q = qs + *pExpr = PreprocessExpr(expr, start, end) return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { - if err := ng.validateOpts(expr); err != nil { - return nil, err - } - - // Default to empty QueryOpts if not provided. +func (ng *Engine) newQuery(q storage.Queryable, qs string, opts QueryOpts, start, end time.Time, interval time.Duration) (*parser.Expr, *query) { if opts == nil { - opts = &QueryOpts{} + opts = NewPrometheusQueryOpts(false, 0) } - lookbackDelta := opts.LookbackDelta + lookbackDelta := opts.LookbackDelta() if lookbackDelta <= 0 { lookbackDelta = ng.lookbackDelta } es := &parser.EvalStmt{ - Expr: PreprocessExpr(expr, start, end), Start: start, End: end, Interval: interval, LookbackDelta: lookbackDelta, } qry := &query{ + q: qs, stmt: es, ng: ng, stats: stats.NewQueryTimers(), - sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats), + sampleStats: stats.NewQuerySamples(ng.enablePerStepStats && opts.EnablePerStepStats()), queryable: q, } - return qry, nil + return &es.Expr, qry } var ( @@ -535,7 +575,10 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query { // statements are not handled by the Engine. func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { ng.metrics.currentQueries.Inc() - defer ng.metrics.currentQueries.Dec() + defer func() { + ng.metrics.currentQueries.Dec() + ng.metrics.querySamples.Add(float64(q.sampleStats.TotalSamples)) + }() ctx, cancel := context.WithTimeout(ctx, ng.timeout) q.cancel = cancel @@ -575,18 +618,11 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag execSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.ExecTotalTime) defer execSpanTimer.Finish() - queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) - // Log query in active log. The active log guarantees that we don't run over - // MaxConcurrent queries. - if ng.activeQueryTracker != nil { - queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) - if err != nil { - queueSpanTimer.Finish() - return nil, nil, contextErr(err, "query queue") - } - defer ng.activeQueryTracker.Delete(queryIndex) + finishQueue, err := ng.queueActive(ctx, q) + if err != nil { + return nil, nil, err } - queueSpanTimer.Finish() + defer finishQueue() // Cancel when execution is done or an error was raised. defer q.cancel() @@ -609,6 +645,18 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) } +// Log query in active log. The active log guarantees that we don't run over +// MaxConcurrent queries. +func (ng *Engine) queueActive(ctx context.Context, q *query) (func(), error) { + if ng.activeQueryTracker == nil { + return func() {}, nil + } + queueSpanTimer, _ := q.stats.GetSpanTimer(ctx, stats.ExecQueueTime, ng.metrics.queryQueueTime) + queryIndex, err := ng.activeQueryTracker.Insert(ctx, q.q) + queueSpanTimer.Finish() + return func() { ng.activeQueryTracker.Delete(queryIndex) }, err +} + func timeMilliseconds(t time.Time) int64 { return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) } @@ -652,12 +700,13 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval query.sampleStats.InitStepTracking(start, start, 1) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() - var mat Matrix switch result := val.(type) { @@ -677,11 +726,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval for i, s := range mat { // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. - vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} + if len(s.Histograms) > 0 { + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + } else { + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + } } return vector, warnings, nil case parser.ValueTypeScalar: - return Scalar{V: mat[0].Points[0].V, T: start}, warnings, nil + return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: return mat, warnings, nil default: @@ -703,10 +756,12 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() mat, ok := val.(Matrix) if !ok { @@ -736,8 +791,7 @@ func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { ts int64 = math.MaxInt64 ) for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { subqOffset += n.OriginalOffset subqRange += n.Range if n.Timestamp != nil { @@ -773,7 +827,6 @@ func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { maxTimestamp = end } evalRange = 0 - case *parser.MatrixSelector: evalRange = n.Range } @@ -806,20 +859,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS } else { offsetMilliseconds := durationMilliseconds(subqOffset) start = start - offsetMilliseconds - durationMilliseconds(subqRange) - end = end - offsetMilliseconds + end -= offsetMilliseconds } if evalRange == 0 { - start = start - durationMilliseconds(s.LookbackDelta) + start -= durationMilliseconds(s.LookbackDelta) } else { // For all matrix queries we want to ensure that we have (end-start) + range selected // this way we have `range` data before the start time - start = start - durationMilliseconds(evalRange) + start -= durationMilliseconds(evalRange) } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) - start = start - offsetMilliseconds - end = end - offsetMilliseconds + start -= offsetMilliseconds + end -= offsetMilliseconds return start, end } @@ -827,8 +880,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration { var interval time.Duration for _, node := range path { - switch n := node.(type) { - case *parser.SubqueryExpr: + if n, ok := node.(*parser.SubqueryExpr); ok { interval = n.Step if n.Step == 0 { interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond @@ -894,8 +946,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { if len(p) == 0 { return false, nil } - switch n := p[len(p)-1].(type) { - case *parser.AggregateExpr: + if n, ok := p[len(p)-1].(*parser.AggregateExpr); ok { return !n.Without, n.Grouping } return false, nil @@ -935,9 +986,10 @@ type errWithWarnings struct { func (e errWithWarnings) Error() string { return e.err.Error() } -// An evaluator evaluates given expressions over given fixed timestamps. It -// is attached to an engine through which it connects to a querier and reports -// errors. On timeout or cancellation of its context it terminates. +// An evaluator evaluates the given expressions over the given fixed +// timestamps. It is attached to an engine through which it connects to a +// querier and reports errors. On timeout or cancellation of its context it +// terminates. type evaluator struct { ctx context.Context @@ -981,8 +1033,10 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error case errWithWarnings: *errp = err.err *ws = append(*ws, err.warnings...) + case error: + *errp = err default: - *errp = e.(error) + *errp = fmt.Errorf("%v", err) } } @@ -1011,7 +1065,7 @@ type EvalNodeHelper struct { // Caches. // DropMetricName and label_*. Dmn map[uint64]labels.Labels - // funcHistogramQuantile. + // funcHistogramQuantile for conventional histograms. signatureToMetricWithBuckets map[string]*metricWithBuckets // label_replace. regex *regexp.Regexp @@ -1026,6 +1080,14 @@ type EvalNodeHelper struct { resultMetric map[string]labels.Labels } +func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { + if enh.lb == nil { + enh.lb = labels.NewBuilder(lbls) + } else { + enh.lb.Reset(lbls) + } +} + // DropMetricName is a cached version of DropMetricName. func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { if enh.Dmn == nil { @@ -1122,17 +1184,35 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for si, series := range matrixes[i] { - for _, point := range series.Points { + for _, point := range series.Floats { if point.T == ts { if ev.currentSamples < ev.maxSamples { - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: point.F, T: ts}) if prepSeries != nil { bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) } // Move input vectors forward so we don't have to re-scan the same // past points at the next step. - matrixes[i][si].Points = series.Points[1:] + matrixes[i][si].Floats = series.Floats[1:] + ev.currentSamples++ + } else { + ev.error(ErrTooManySamples(env)) + } + } + break + } + for _, point := range series.Histograms { + if point.T == ts { + if ev.currentSamples < ev.maxSamples { + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: point.H, T: ts}) + if prepSeries != nil { + bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) + } + + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Histograms = series.Histograms[1:] ev.currentSamples++ } else { ev.error(ErrTooManySamples(env)) @@ -1169,8 +1249,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) if ev.endTimestamp == ev.startTimestamp { mat := make(Matrix, len(result)) for i, s := range result { - s.Point.T = ts - mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + if s.H == nil { + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + } else { + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + } } ev.currentSamples = originalNumSamples + mat.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1182,22 +1265,28 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) h := sample.Metric.Hash() ss, ok := seriess[h] if !ok { - ss = Series{ - Metric: sample.Metric, - Points: getPointSlice(numSteps), - } + ss = Series{Metric: sample.Metric} + } + if sample.H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H}) } - sample.Point.T = ts - ss.Points = append(ss.Points, sample.Point) seriess[h] = ss - } } // Reuse the original point slices. for _, m := range origMatrixes { for _, s := range m { - putPointSlice(s.Points) + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) } } // Assemble the output matrix. By the time we get here we know we don't have too many samples. @@ -1238,7 +1327,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } totalSamples := 0 for _, s := range mat { - totalSamples += len(s.Points) + totalSamples += len(s.Floats) + len(s.Histograms) vs.Series = append(vs.Series, NewStorageSeries(s)) } return ms, totalSamples, ws @@ -1282,7 +1371,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { var param float64 if e.Param != nil { - param = v[0].(Vector)[0].V + param = v[0].(Vector)[0].F } return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil }, e.Param, e.Expr) @@ -1381,16 +1470,24 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { stepRange = ev.interval } // Reuse objects across steps to save memory allocations. - points := getPointSlice(16) + var floats []FPoint + var histograms []HPoint inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) + var chkIter chunkenc.Iterator for i, s := range selVS.Series { - ev.currentSamples -= len(points) - points = points[:0] - it.Reset(s.Iterator()) + ev.currentSamples -= len(floats) + len(histograms) + if floats != nil { + floats = floats[:0] + } + if histograms != nil { + histograms = histograms[:0] + } + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) metric := selVS.Series[i].Labels() // The last_over_time function acts like offset; thus, it // should keep the metric name. For all the other range @@ -1401,7 +1498,6 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } ss := Series{ Metric: metric, - Points: getPointSlice(numSteps), } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1411,44 +1507,54 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { // when looking up the argument, as there will be no gaps. for j := range e.Args { if j != matrixArgIndex { - otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + otherInArgs[j][0].F = otherArgs[j][0].Floats[step].F } } maxt := ts - offset mint := maxt - selRange // Evaluate the matrix selector for this series for this step. - points = ev.matrixIterSlice(it, mint, maxt, points) - if len(points) == 0 { + floats, histograms = ev.matrixIterSlice(it, mint, maxt, floats, histograms) + if len(floats)+len(histograms) == 0 { continue } - inMatrix[0].Points = points + inMatrix[0].Floats = floats + inMatrix[0].Histograms = histograms enh.Ts = ts // Make the function call. outVec := call(inArgs, e.Args, enh) - ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points))) + ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) enh.Out = outVec[:0] if len(outVec) > 0 { - ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) + if outVec[0].H == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: outVec[0].F, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: outVec[0].H, T: ts}) + } } // Only buffer stepRange milliseconds from the second step on. it.ReduceDelta(stepRange) } - if len(ss.Points) > 0 { - if ev.currentSamples+len(ss.Points) <= ev.maxSamples { + if len(ss.Floats)+len(ss.Histograms) > 0 { + if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples { mat = append(mat, ss) - ev.currentSamples += len(ss.Points) + ev.currentSamples += len(ss.Floats) + len(ss.Histograms) } else { ev.error(ErrTooManySamples(env)) } - } else { - putPointSlice(ss.Points) } ev.samplesStats.UpdatePeak(ev.currentSamples) } ev.samplesStats.UpdatePeak(ev.currentSamples) - ev.currentSamples -= len(points) - putPointSlice(points) + ev.currentSamples -= len(floats) + len(histograms) + putFPointSlice(floats) + putHPointSlice(histograms) // The absent_over_time function returns 0 or 1 series. So far, the matrix // contains multiple series. The following code will create a new series @@ -1457,7 +1563,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { steps := int(1 + (ev.endTimestamp-ev.startTimestamp)/ev.interval) // Iterate once to look for a complete series. for _, s := range mat { - if len(s.Points) == steps { + if len(s.Floats)+len(s.Histograms) == steps { return Matrix{}, warnings } } @@ -1465,7 +1571,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { found := map[int64]struct{}{} for i, s := range mat { - for _, p := range s.Points { + for _, p := range s.Floats { + found[p.T] = struct{}{} + } + for _, p := range s.Histograms { found[p.T] = struct{}{} } if i > 0 && len(found) == steps { @@ -1473,17 +1582,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - newp := make([]Point, 0, steps-len(found)) + newp := make([]FPoint, 0, steps-len(found)) for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { if _, ok := found[ts]; !ok { - newp = append(newp, Point{T: ts, V: 1}) + newp = append(newp, FPoint{T: ts, F: 1}) } } return Matrix{ Series{ Metric: createLabelsForAbsentFunction(e.Args[0]), - Points: newp, + Floats: newp, }, }, warnings } @@ -1503,8 +1612,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { if e.Op == parser.SUB { for i := range mat { mat[i].Metric = dropMetricName(mat[i].Metric) - for j := range mat[i].Points { - mat[i].Points[j].V = -mat[i].Points[j].V + for j := range mat[i].Floats { + mat[i].Floats[j].F = -mat[i].Floats[j].F } } if mat.ContainsSameLabelset() { @@ -1517,8 +1626,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) - return append(enh.Out, Sample{Point: Point{V: val}}), nil + val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) + return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector: // Function to compute the join signature for each series. @@ -1548,18 +1657,18 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh), nil + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil }, e.LHS, e.RHS) } case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil + return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1572,19 +1681,30 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } mat := make(Matrix, 0, len(e.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range e.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), - Points: getPointSlice(numSteps), } for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ - _, v, ok := ev.vectorSelectorSingle(it, e, ts) + _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) if ok { if ev.currentSamples < ev.maxSamples { - ss.Points = append(ss.Points, Point{V: v, T: ts}) + if h == nil { + if ss.Floats == nil { + ss.Floats = getFPointSlice(numSteps) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if ss.Histograms == nil { + ss.Histograms = getHPointSlice(numSteps) + } + ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts}) + } ev.samplesStats.IncrementSamplesAtStep(step, 1) ev.currentSamples++ } else { @@ -1593,10 +1713,8 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - if len(ss.Points) > 0 { + if len(ss.Floats)+len(ss.Histograms) > 0 { mat = append(mat, ss) - } else { - putPointSlice(ss.Points) } } ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1668,7 +1786,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { res, ws := newEv.eval(e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts = ts + ev.interval { + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { step++ ev.samplesStats.IncrementSamplesAtStep(step, newEv.samplesStats.TotalSamples) } @@ -1687,14 +1805,21 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(fmt.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr)) } for i := range mat { - if len(mat[i].Points) != 1 { + if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { panic(fmt.Errorf("unexpected number of samples")) } - for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { - mat[i].Points = append(mat[i].Points, Point{ - T: ts, - V: mat[i].Points[0].V, - }) + for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { + if len(mat[i].Floats) > 0 { + mat[i].Floats = append(mat[i].Floats, FPoint{ + T: ts, + F: mat[i].Floats[0].F, + }) + } else { + mat[i].Histograms = append(mat[i].Histograms, HPoint{ + T: ts, + H: mat[i].Histograms[0].H, + }) + } ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1716,14 +1841,18 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect } vec := make(Vector, 0, len(node.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range node.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) - t, v, ok := ev.vectorSelectorSingle(it, node, ts) + t, f, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { vec = append(vec, Sample{ Metric: node.Series[i].Labels(), - Point: Point{V: v, T: t}, + T: t, + F: f, + H: h, }) ev.currentSamples++ @@ -1738,48 +1867,70 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect return vec, ws } -// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) (int64, float64, bool) { +// vectorSelectorSingle evaluates an instant vector for the iterator of one time series. +func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) ( + int64, float64, *histogram.FloatHistogram, bool, +) { refTime := ts - durationMilliseconds(node.Offset) var t int64 var v float64 + var h *histogram.FloatHistogram - ok := it.Seek(refTime) - if !ok { + valueType := it.Seek(refTime) + switch valueType { + case chunkenc.ValNone: if it.Err() != nil { ev.error(it.Err()) } - } - - if ok { + case chunkenc.ValFloat: t, v = it.At() + case chunkenc.ValFloatHistogram: + t, h = it.AtFloatHistogram() + default: + panic(fmt.Errorf("unknown value type %v", valueType)) } - - if !ok || t > refTime { - t, v, ok = it.PeekPrev() + if valueType == chunkenc.ValNone || t > refTime { + var ok bool + t, v, h, ok = it.PeekPrev() if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { - return 0, 0, false + return 0, 0, nil, false } } - if value.IsStaleNaN(v) { - return 0, 0, false + if value.IsStaleNaN(v) || (h != nil && value.IsStaleNaN(h.Sum)) { + return 0, 0, nil, false } - return t, v, true + return t, v, h, true } -var pointPool = sync.Pool{} +var ( + fPointPool zeropool.Pool[[]FPoint] + hPointPool zeropool.Pool[[]HPoint] +) -func getPointSlice(sz int) []Point { - p := pointPool.Get() +func getFPointSlice(sz int) []FPoint { + if p := fPointPool.Get(); p != nil { + return p + } + return make([]FPoint, 0, sz) +} + +func putFPointSlice(p []FPoint) { if p != nil { - return p.([]Point) + fPointPool.Put(p[:0]) } - return make([]Point, 0, sz) } -func putPointSlice(p []Point) { - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - pointPool.Put(p[:0]) +func getHPointSlice(sz int) []HPoint { + if p := hPointPool.Get(); p != nil { + return p + } + return make([]HPoint, 0, sz) +} + +func putHPointSlice(p []HPoint) { + if p != nil { + hPointPool.Put(p[:0]) + } } // matrixSelector evaluates a *parser.MatrixSelector expression. @@ -1799,23 +1950,27 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { if err := contextDone(ev.ctx, "expression evaluation"); err != nil { ev.error(err) } - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: series[i].Labels(), } - ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) - ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points))) + ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil) + totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms)) + ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen) - if len(ss.Points) > 0 { + if totalLen > 0 { matrix = append(matrix, ss) } else { - putPointSlice(ss.Points) + putFPointSlice(ss.Floats) + putHPointSlice(ss.Histograms) } } return matrix, ws @@ -1829,61 +1984,132 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag // values). Any such points falling before mint are discarded; points that fall // into the [mint, maxt] range are retained; only points with later timestamps // are populated from the iterator. -func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { - if len(out) > 0 && out[len(out)-1].T >= mint { +func (ev *evaluator) matrixIterSlice( + it *storage.BufferedSeriesIterator, mint, maxt int64, + floats []FPoint, histograms []HPoint, +) ([]FPoint, []HPoint) { + mintFloats, mintHistograms := mint, mint + + // First floats... + if len(floats) > 0 && floats[len(floats)-1].T >= mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; out[drop].T < mint; drop++ { + for drop = 0; floats[drop].T < mint; drop++ { // nolint:revive } ev.currentSamples -= drop - copy(out, out[drop:]) - out = out[:len(out)-drop] + copy(floats, floats[drop:]) + floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mint = out[len(out)-1].T + 1 + mintFloats = floats[len(floats)-1].T + 1 } else { - ev.currentSamples -= len(out) - out = out[:0] + ev.currentSamples -= len(floats) + if floats != nil { + floats = floats[:0] + } } - ok := it.Seek(maxt) - if !ok { + // ...then the same for histograms. TODO(beorn7): Use generics? + if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive + } + ev.currentSamples -= drop + copy(histograms, histograms[drop:]) + histograms = histograms[:len(histograms)-drop] + // Only append points with timestamps after the last timestamp we have. + mintHistograms = histograms[len(histograms)-1].T + 1 + } else { + ev.currentSamples -= len(histograms) + if histograms != nil { + histograms = histograms[:0] + } + } + + soughtValueType := it.Seek(maxt) + if soughtValueType == chunkenc.ValNone { if it.Err() != nil { ev.error(it.Err()) } } buf := it.Buffer() - for buf.Next() { - t, v := buf.At() - if value.IsStaleNaN(v) { - continue - } - // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mint { - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) +loop: + for { + switch buf.Next() { + case chunkenc.ValNone: + break loop + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := buf.AtFloatHistogram() + if value.IsStaleNaN(h.Sum) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mintHistograms { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) + } + case chunkenc.ValFloat: + t, f := buf.At() + if value.IsStaleNaN(f) { + continue loop + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mintFloats { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + ev.currentSamples++ + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) } - ev.currentSamples++ - out = append(out, Point{T: t, V: v}) } } - // The seeked sample might also be in the range. - if ok { - t, v := it.At() - if t == maxt && !value.IsStaleNaN(v) { + // The sought sample might also be in the range. + switch soughtValueType { + case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: + t, h := it.AtFloatHistogram() + if t == maxt && !value.IsStaleNaN(h.Sum) { if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, V: v}) + if histograms == nil { + histograms = getHPointSlice(16) + } + histograms = append(histograms, HPoint{T: t, H: h}) + ev.currentSamples++ + } + case chunkenc.ValFloat: + t, f := it.At() + if t == maxt && !value.IsStaleNaN(f) { + if ev.currentSamples >= ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if floats == nil { + floats = getFPointSlice(16) + } + floats = append(floats, FPoint{T: t, F: f}) ev.currentSamples++ } } ev.samplesStats.UpdatePeak(ev.currentSamples) - return out + return floats, histograms } func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { @@ -1911,13 +2137,13 @@ func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, } func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector { - if matching.Card != parser.CardManyToMany { + switch { + case matching.Card != parser.CardManyToMany: panic("set operations must only use many-to-many matching") - } - if len(lhs) == 0 { // Short-circuit. + case len(lhs) == 0: // Short-circuit. enh.Out = append(enh.Out, rhs...) return enh.Out - } else if len(rhs) == 0 { + case len(rhs) == 0: enh.Out = append(enh.Out, lhs...) return enh.Out } @@ -2029,18 +2255,21 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } // Account for potentially swapped sidedness. - vl, vr := ls.V, rs.V + fl, fr := ls.F, rs.F + hl, hr := ls.H, rs.H if matching.Card == parser.CardOneToMany { - vl, vr = vr, vl + fl, fr = fr, fl + hl, hr = hr, hl } - value, keep := vectorElemBinop(op, vl, vr) - if returnBool { + floatValue, histogramValue, keep := vectorElemBinop(op, fl, fr, hl, hr) + switch { + case returnBool: if keep { - value = 1.0 + floatValue = 1.0 } else { - value = 0.0 + floatValue = 0.0 } - } else if !keep { + case !keep: continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) @@ -2070,7 +2299,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * enh.Out = append(enh.Out, Sample{ Metric: metric, - Point: Point{V: value}, + F: floatValue, + H: histogramValue, }) } return enh.Out @@ -2097,12 +2327,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V enh.resultMetric = make(map[string]labels.Labels, len(enh.Out)) } - if enh.lb == nil { - enh.lb = labels.NewBuilder(lhs) - } else { - enh.lb.Reset(lhs) - } - + enh.resetBuilder(lhs) buf := bytes.NewBuffer(enh.lblResultBuf[:0]) enh.lblBuf = lhs.Bytes(enh.lblBuf) buf.Write(enh.lblBuf) @@ -2135,7 +2360,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(nil) + ret := enh.lb.Labels() enh.resultMetric[str] = ret return ret } @@ -2143,28 +2368,33 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { for _, lhsSample := range lhs { - lv, rv := lhsSample.V, rhs.V + lf, rf := lhsSample.F, rhs.V + var rh *histogram.FloatHistogram + lh := lhsSample.H // lhs always contains the Vector. If the original position was different // swap for calculating the value. if swap { - lv, rv = rv, lv + lf, rf = rf, lf + lh, rh = rh, lh } - value, keep := vectorElemBinop(op, lv, rv) + float, histogram, keep := vectorElemBinop(op, lf, rf, lh, rh) // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. if op.IsComparisonOperator() && swap { - value = rv + float = rf + histogram = rh } if returnBool { if keep { - value = 1.0 + float = 1.0 } else { - value = 0.0 + float = 0.0 } keep = true } if keep { - lhsSample.V = value + lhsSample.F = float + lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { lhsSample.Metric = enh.DropMetricName(lhsSample.Metric) } @@ -2175,7 +2405,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil) + return labels.NewBuilder(l).Del(labels.MetricName).Labels() } // scalarBinop evaluates a binary operation between two Scalars. @@ -2212,45 +2442,74 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool) { switch op { case parser.ADD: - return lhs + rhs, true + if hlhs != nil && hrhs != nil { + // The histogram being added must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Add(hrhs).Compact(0), true + } + return 0, hrhs.Copy().Add(hlhs).Compact(0), true + } + return lhs + rhs, nil, true case parser.SUB: - return lhs - rhs, true + if hlhs != nil && hrhs != nil { + // The histogram being subtracted must have the larger schema + // code (i.e. the higher resolution). + if hrhs.Schema >= hlhs.Schema { + return 0, hlhs.Copy().Sub(hrhs).Compact(0), true + } + return 0, hrhs.Copy().Mul(-1).Add(hlhs).Compact(0), true + } + return lhs - rhs, nil, true case parser.MUL: - return lhs * rhs, true + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Mul(rhs), true + } + if hlhs == nil && hrhs != nil { + return 0, hrhs.Copy().Mul(lhs), true + } + return lhs * rhs, nil, true case parser.DIV: - return lhs / rhs, true + if hlhs != nil && hrhs == nil { + return 0, hlhs.Copy().Div(rhs), true + } + return lhs / rhs, nil, true case parser.POW: - return math.Pow(lhs, rhs), true + return math.Pow(lhs, rhs), nil, true case parser.MOD: - return math.Mod(lhs, rhs), true + return math.Mod(lhs, rhs), nil, true case parser.EQLC: - return lhs, lhs == rhs + return lhs, nil, lhs == rhs case parser.NEQ: - return lhs, lhs != rhs + return lhs, nil, lhs != rhs case parser.GTR: - return lhs, lhs > rhs + return lhs, nil, lhs > rhs case parser.LSS: - return lhs, lhs < rhs + return lhs, nil, lhs < rhs case parser.GTE: - return lhs, lhs >= rhs + return lhs, nil, lhs >= rhs case parser.LTE: - return lhs, lhs <= rhs + return lhs, nil, lhs <= rhs case parser.ATAN2: - return math.Atan2(lhs, rhs), true + return math.Atan2(lhs, rhs), nil, true } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } type groupedAggregation struct { - labels labels.Labels - value float64 - mean float64 - groupCount int - heap vectorByValueHeap - reverseHeap vectorByReverseValueHeap + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + labels labels.Labels + floatValue float64 + histogramValue *histogram.FloatHistogram + floatMean float64 + histogramMean *histogram.FloatHistogram + groupCount int + heap vectorByValueHeap + reverseHeap vectorByReverseValueHeap } // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels @@ -2291,15 +2550,14 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } } - lb := labels.NewBuilder(nil) var buf []byte for si, s := range vec { metric := s.Metric if op == parser.COUNT_VALUES { - lb.Reset(metric) - lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels(nil) + enh.resetBuilder(metric) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64)) + metric = enh.lb.Labels() // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2316,134 +2574,194 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group, ok := result[groupingKey] // Add a new group if it doesn't exist. if !ok { - lb.Reset(metric) - if without { - lb.Del(grouping...) - lb.Del(labels.MetricName) - } else { - lb.Keep(grouping...) + var m labels.Labels + enh.resetBuilder(metric) + switch { + case without: + enh.lb.Del(grouping...) + enh.lb.Del(labels.MetricName) + m = enh.lb.Labels() + case len(grouping) > 0: + enh.lb.Keep(grouping...) + m = enh.lb.Labels() + default: + m = labels.EmptyLabels() } - m := lb.Labels(nil) newAgg := &groupedAggregation{ labels: m, - value: s.V, - mean: s.V, + floatValue: s.F, + floatMean: s.F, groupCount: 1, } + switch { + case s.H == nil: + newAgg.hasFloat = true + case op == parser.SUM: + newAgg.histogramValue = s.H.Copy() + newAgg.hasHistogram = true + case op == parser.AVG: + newAgg.histogramMean = s.H.Copy() + newAgg.hasHistogram = true + case op == parser.STDVAR || op == parser.STDDEV: + newAgg.groupCount = 0 + } result[groupingKey] = newAgg orderedResult = append(orderedResult, newAgg) inputVecLen := int64(len(vec)) resultSize := k - if k > inputVecLen { + switch { + case k > inputVecLen: resultSize = inputVecLen - } else if k == 0 { + case k == 0: resultSize = 1 } switch op { case parser.STDVAR, parser.STDDEV: - result[groupingKey].value = 0 + result[groupingKey].floatValue = 0 case parser.TOPK, parser.QUANTILE: result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) result[groupingKey].heap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.BOTTOMK: result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize) result[groupingKey].reverseHeap[0] = Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, } case parser.GROUP: - result[groupingKey].value = 1 + result[groupingKey].floatValue = 1 } continue } switch op { case parser.SUM: - group.value += s.V + if s.H != nil { + group.hasHistogram = true + if group.histogramValue != nil { + // The histogram being added must have + // an equal or larger schema. + if s.H.Schema >= group.histogramValue.Schema { + group.histogramValue.Add(s.H) + } else { + group.histogramValue = s.H.Copy().Add(group.histogramValue) + } + } + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + group.floatValue += s.F + } case parser.AVG: group.groupCount++ - if math.IsInf(group.mean, 0) { - if math.IsInf(s.V, 0) && (group.mean > 0) == (s.V > 0) { - // The `mean` and `s.V` values are `Inf` of the same sign. They - // can't be subtracted, but the value of `mean` is correct - // already. - break + if s.H != nil { + group.hasHistogram = true + if group.histogramMean != nil { + left := s.H.Copy().Div(float64(group.groupCount)) + right := group.histogramMean.Copy().Div(float64(group.groupCount)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if s.H.Schema >= group.histogramMean.Schema { + toAdd := right.Mul(-1).Add(left) + group.histogramMean.Add(toAdd) + } else { + toAdd := left.Sub(right) + group.histogramMean = toAdd.Add(group.histogramMean) + } } - if !math.IsInf(s.V, 0) && !math.IsNaN(s.V) { - // At this stage, the mean is an infinite. If the added - // value is neither an Inf or a Nan, we can keep that mean - // value. - // This is required because our calculation below removes - // the mean value, which would look like Inf += x - Inf and - // end up as a NaN. - break + // Otherwise the aggregation contained floats + // previously and will be invalid anyway. No + // point in copying the histogram in that case. + } else { + group.hasFloat = true + if math.IsInf(group.floatMean, 0) { + if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { + // The `floatMean` and `s.F` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `floatMean` is correct + // already. + break + } + if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + break + } } + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.mean += s.V/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: - if group.value < s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue < s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.MIN: - if group.value > s.V || math.IsNaN(group.value) { - group.value = s.V + if group.floatValue > s.F || math.IsNaN(group.floatValue) { + group.floatValue = s.F } case parser.COUNT, parser.COUNT_VALUES: group.groupCount++ case parser.STDVAR, parser.STDDEV: - group.groupCount++ - delta := s.V - group.mean - group.mean += delta / float64(group.groupCount) - group.value += delta * (s.V - group.mean) + if s.H == nil { // Ignore native histograms. + group.groupCount++ + delta := s.F - group.floatMean + group.floatMean += delta / float64(group.groupCount) + group.floatValue += delta * (s.F - group.floatMean) + } case parser.TOPK: - if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { - if int64(len(group.heap)) == k { - if k == 1 { // For k==1 we can replace in-situ. - group.heap[0] = Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - } - break - } - heap.Pop(&group.heap) - } + // We build a heap of up to k elements, with the smallest element at heap[0]. + switch { + case int64(len(group.heap)) < k: heap.Push(&group.heap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) + case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): + // This new element is bigger than the previous smallest element - overwrite that. + group.heap[0] = Sample{ + F: s.F, + Metric: s.Metric, + } + if k > 1 { + heap.Fix(&group.heap, 0) // Maintain the heap invariant. + } } case parser.BOTTOMK: - if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { - if int64(len(group.reverseHeap)) == k { - if k == 1 { // For k==1 we can replace in-situ. - group.reverseHeap[0] = Sample{ - Point: Point{V: s.V}, - Metric: s.Metric, - } - break - } - heap.Pop(&group.reverseHeap) - } + // We build a heap of up to k elements, with the biggest element at heap[0]. + switch { + case int64(len(group.reverseHeap)) < k: heap.Push(&group.reverseHeap, &Sample{ - Point: Point{V: s.V}, + F: s.F, Metric: s.Metric, }) + case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)): + // This new element is smaller than the previous biggest element - overwrite that. + group.reverseHeap[0] = Sample{ + F: s.F, + Metric: s.Metric, + } + if k > 1 { + heap.Fix(&group.reverseHeap, 0) // Maintain the heap invariant. + } } case parser.QUANTILE: @@ -2458,16 +2776,25 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, aggr := range orderedResult { switch op { case parser.AVG: - aggr.value = aggr.mean + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. + continue + } + if aggr.hasHistogram { + aggr.histogramValue = aggr.histogramMean.Compact(0) + } else { + aggr.floatValue = aggr.floatMean + } case parser.COUNT, parser.COUNT_VALUES: - aggr.value = float64(aggr.groupCount) + aggr.floatValue = float64(aggr.groupCount) case parser.STDVAR: - aggr.value = aggr.value / float64(aggr.groupCount) + aggr.floatValue /= float64(aggr.groupCount) case parser.STDDEV: - aggr.value = math.Sqrt(aggr.value / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) case parser.TOPK: // The heap keeps the lowest value on top, so reverse it. @@ -2477,7 +2804,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.heap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. @@ -2490,21 +2817,31 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without for _, v := range aggr.reverseHeap { enh.Out = append(enh.Out, Sample{ Metric: v.Metric, - Point: Point{V: v.V}, + F: v.F, }) } continue // Bypass default append. case parser.QUANTILE: - aggr.value = quantile(q, aggr.heap) + aggr.floatValue = quantile(q, aggr.heap) + case parser.SUM: + if aggr.hasFloat && aggr.hasHistogram { + // We cannot aggregate histogram sample with a float64 sample. + // TODO(zenador): Issue warning when plumbing is in place. + continue + } + if aggr.hasHistogram { + aggr.histogramValue.Compact(0) + } default: // For other aggregations, we already have the right value. } enh.Out = append(enh.Out, Sample{ Metric: aggr.labels, - Point: Point{V: aggr.value}, + F: aggr.floatValue, + H: aggr.histogramValue, }) } return enh.Out @@ -2588,9 +2925,10 @@ func PreprocessExpr(expr parser.Expr, start, end time.Time) parser.Expr { func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { switch n := expr.(type) { case *parser.VectorSelector: - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil @@ -2647,9 +2985,10 @@ func preprocessExprHelper(expr parser.Expr, start, end time.Time) bool { if isInvariant { n.Expr = newStepInvariantExpr(n.Expr) } - if n.StartOrEnd == parser.START { + switch n.StartOrEnd { + case parser.START: n.Timestamp = makeInt64Pointer(timestamp.FromTime(start)) - } else if n.StartOrEnd == parser.END { + case parser.END: n.Timestamp = makeInt64Pointer(timestamp.FromTime(end)) } return n.Timestamp != nil diff --git a/promql/engine_test.go b/promql/engine_test.go index 6a75ccb282..5ffebc202d 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "math" "os" "sort" "testing" @@ -24,15 +25,17 @@ import ( "github.com/go-kit/log" - "github.com/prometheus/prometheus/util/stats" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/require" "go.uber.org/goleak" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/stats" ) func TestMain(m *testing.M) { @@ -228,14 +231,14 @@ func TestQueryError(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - vectorQuery, err := engine.NewInstantQuery(queryable, nil, "foo", time.Unix(1, 0)) + vectorQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo", time.Unix(1, 0)) require.NoError(t, err) res := vectorQuery.Exec(ctx) require.Error(t, res.Err, "expected error on failed select but got none") require.True(t, errors.Is(res.Err, errStorage), "expected error doesn't match") - matrixQuery, err := engine.NewInstantQuery(queryable, nil, "foo[1m]", time.Unix(1, 0)) + matrixQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo[1m]", time.Unix(1, 0)) require.NoError(t, err) res = matrixQuery.Exec(ctx) @@ -561,14 +564,15 @@ func TestSelectHintsSetCorrectly(t *testing.T) { query Query err error ) + ctx := context.Background() if tc.end == 0 { - query, err = engine.NewInstantQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) + query, err = engine.NewInstantQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start)) } else { - query, err = engine.NewRangeQuery(hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second) + query, err = engine.NewRangeQuery(ctx, hintsRecorder, nil, tc.query, timestamp.Time(tc.start), timestamp.Time(tc.end), time.Second) } require.NoError(t, err) - res := query.Exec(context.Background()) + res := query.Exec(ctx) require.NoError(t, res.Err) require.Equal(t, tc.expected, hintsRecorder.hints) @@ -659,7 +663,8 @@ load 10s Query: "metric", Result: Vector{ Sample{ - Point: Point{V: 1, T: 1000}, + F: 1, + T: 1000, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -669,7 +674,7 @@ load 10s Query: "metric[20s]", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -680,7 +685,8 @@ load 10s Query: "1", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Metric: labels.EmptyLabels(), }, }, Start: time.Unix(0, 0), @@ -691,7 +697,7 @@ load 10s Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -703,7 +709,7 @@ load 10s Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -722,9 +728,9 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1192,16 +1198,16 @@ load 10s origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := &QueryOpts{EnablePerStepStats: true} + opts := NewPrometheusQueryOpts(true, 0) engine.maxSamplesPerQuery = origMaxSamples runQuery := func(expErr error) *stats.Statistics { var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.Queryable(), opts, c.Query, c.Start) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), opts, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(test.context, test.Queryable(), opts, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1382,9 +1388,9 @@ load 10s var err error var qry Query if c.Interval == 0 { - qry, err = engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) } else { - qry, err = engine.NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err = engine.NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) } require.NoError(t, err) @@ -1458,20 +1464,20 @@ load 1ms query: `metric_neg @ 0`, start: 100, result: Vector{ - Sample{Point: Point{V: 1, T: 100000}, Metric: lblsneg}, + Sample{F: 1, T: 100000, Metric: lblsneg}, }, }, { query: `metric_neg @ -200`, start: 100, result: Vector{ - Sample{Point: Point{V: 201, T: 100000}, Metric: lblsneg}, + Sample{F: 201, T: 100000, Metric: lblsneg}, }, }, { query: `metric{job="2"} @ 50`, start: -2, end: 2, interval: 1, result: Matrix{ Series{ - Points: []Point{{V: 10, T: -2000}, {V: 10, T: -1000}, {V: 10, T: 0}, {V: 10, T: 1000}, {V: 10, T: 2000}}, + Floats: []FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, Metric: lbls2, }, }, @@ -1480,11 +1486,11 @@ load 1ms start: 10, result: Matrix{ Series{ - Points: []Point{{V: 28, T: 280000}, {V: 29, T: 290000}, {V: 30, T: 300000}}, + Floats: []FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, Series{ - Points: []Point{{V: 56, T: 280000}, {V: 58, T: 290000}, {V: 60, T: 300000}}, + Floats: []FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1493,7 +1499,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 3, T: -2000}, {V: 2, T: -1000}, {V: 1, T: 0}}, + Floats: []FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1502,7 +1508,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 504, T: -503000}, {V: 503, T: -502000}, {V: 502, T: -501000}, {V: 501, T: -500000}}, + Floats: []FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, @@ -1511,7 +1517,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 2342, T: 2342}, {V: 2343, T: 2343}, {V: 2344, T: 2344}, {V: 2345, T: 2345}}, + Floats: []FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, @@ -1520,11 +1526,11 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 20, T: 200000}, {V: 22, T: 225000}, {V: 25, T: 250000}, {V: 27, T: 275000}, {V: 30, T: 300000}}, + Floats: []FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, Metric: lbls1, }, Series{ - Points: []Point{{V: 40, T: 200000}, {V: 44, T: 225000}, {V: 50, T: 250000}, {V: 54, T: 275000}, {V: 60, T: 300000}}, + Floats: []FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, @@ -1533,7 +1539,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 51, T: -50000}, {V: 26, T: -25000}, {V: 1, T: 0}}, + Floats: []FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, @@ -1542,7 +1548,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 151, T: -150000}, {V: 126, T: -125000}, {V: 101, T: -100000}}, + Floats: []FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, Metric: lblsneg, }, }, @@ -1551,7 +1557,7 @@ load 1ms start: 100, result: Matrix{ Series{ - Points: []Point{{V: 2250, T: 2250}, {V: 2275, T: 2275}, {V: 2300, T: 2300}, {V: 2325, T: 2325}}, + Floats: []FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, Metric: lblsms, }, }, @@ -1560,7 +1566,7 @@ load 1ms start: 50, end: 80, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 995, T: 50000}, {V: 994, T: 60000}, {V: 993, T: 70000}, {V: 992, T: 80000}}, + Floats: []FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, Metric: lblstopk3, }, }, @@ -1569,7 +1575,7 @@ load 1ms start: 50, end: 80, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 10, T: 50000}, {V: 12, T: 60000}, {V: 14, T: 70000}, {V: 16, T: 80000}}, + Floats: []FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, Metric: lblstopk2, }, }, @@ -1578,7 +1584,7 @@ load 1ms start: 70, end: 100, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 993, T: 70000}, {V: 992, T: 80000}, {V: 991, T: 90000}, {V: 990, T: 100000}}, + Floats: []FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, Metric: lblstopk3, }, }, @@ -1587,7 +1593,7 @@ load 1ms start: 100, end: 130, interval: 10, result: Matrix{ Series{ - Points: []Point{{V: 990, T: 100000}, {V: 989, T: 110000}, {V: 988, T: 120000}, {V: 987, T: 130000}}, + Floats: []FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, Metric: lblstopk3, }, }, @@ -1598,15 +1604,15 @@ load 1ms start: 0, end: 7 * 60, interval: 60, result: Matrix{ Series{ - Points: []Point{ - {V: 3600, T: 0}, - {V: 3600, T: 60 * 1000}, - {V: 3600, T: 2 * 60 * 1000}, - {V: 3600, T: 3 * 60 * 1000}, - {V: 3600, T: 4 * 60 * 1000}, - {V: 3600, T: 5 * 60 * 1000}, - {V: 3600, T: 6 * 60 * 1000}, - {V: 3600, T: 7 * 60 * 1000}, + Floats: []FPoint{ + {F: 3600, T: 0}, + {F: 3600, T: 60 * 1000}, + {F: 3600, T: 2 * 60 * 1000}, + {F: 3600, T: 3 * 60 * 1000}, + {F: 3600, T: 4 * 60 * 1000}, + {F: 3600, T: 5 * 60 * 1000}, + {F: 3600, T: 6 * 60 * 1000}, + {F: 3600, T: 7 * 60 * 1000}, }, Metric: labels.EmptyLabels(), }, @@ -1623,9 +1629,9 @@ load 1ms var err error var qry Query if c.end == 0 { - qry, err = test.QueryEngine().NewInstantQuery(test.Queryable(), nil, c.query, start) + qry, err = test.QueryEngine().NewInstantQuery(test.context, test.Queryable(), nil, c.query, start) } else { - qry, err = test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.query, start, end, interval) + qry, err = test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.query, start, end, interval) } require.NoError(t, err) @@ -1719,7 +1725,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1733,7 +1739,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1747,7 +1753,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1761,7 +1767,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 1, T: 5000}, {V: 2, T: 10000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1775,7 +1781,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Floats: []FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1789,7 +1795,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}, {V: 2, T: 30000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1803,7 +1809,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1817,7 +1823,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 2, T: 10000}, {V: 2, T: 15000}, {V: 2, T: 20000}, {V: 2, T: 25000}}, + Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1840,7 +1846,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 9990, T: 9990000}, {V: 10000, T: 10000000}, {V: 100, T: 10010000}, {V: 130, T: 10020000}}, + Floats: []FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1854,7 +1860,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 9840, T: 9840000}, {V: 9900, T: 9900000}, {V: 9960, T: 9960000}, {V: 130, T: 10020000}, {V: 310, T: 10080000}}, + Floats: []FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1868,7 +1874,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 8640, T: 8640000}, {V: 8700, T: 8700000}, {V: 8760, T: 8760000}, {V: 8820, T: 8820000}, {V: 8880, T: 8880000}}, + Floats: []FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1882,19 +1888,19 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 3, T: 7985000}, {V: 3, T: 7990000}, {V: 3, T: 7995000}, {V: 3, T: 8000000}}, + Floats: []FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), }, Series{ - Points: []Point{{V: 4, T: 7985000}, {V: 4, T: 7990000}, {V: 4, T: 7995000}, {V: 4, T: 8000000}}, + Floats: []FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), }, Series{ - Points: []Point{{V: 1, T: 7985000}, {V: 1, T: 7990000}, {V: 1, T: 7995000}, {V: 1, T: 8000000}}, + Floats: []FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), }, Series{ - Points: []Point{{V: 2, T: 7985000}, {V: 2, T: 7990000}, {V: 2, T: 7995000}, {V: 2, T: 8000000}}, + Floats: []FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), }, }, @@ -1908,7 +1914,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}}, + Floats: []FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1922,7 +1928,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}}, + Floats: []FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1936,7 +1942,7 @@ func TestSubquerySelector(t *testing.T) { nil, Matrix{ Series{ - Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}}, + Floats: []FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1956,7 +1962,7 @@ func TestSubquerySelector(t *testing.T) { engine := test.QueryEngine() for _, c := range tst.cases { t.Run(c.Query, func(t *testing.T) { - qry, err := engine.NewInstantQuery(test.Queryable(), nil, c.Query, c.Start) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, c.Query, c.Start) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -2904,6 +2910,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { } func TestEngineOptsValidation(t *testing.T) { + ctx := context.Background() cases := []struct { opts EngineOpts query string @@ -2963,8 +2970,8 @@ func TestEngineOptsValidation(t *testing.T) { for _, c := range cases { eng := NewEngine(c.opts) - _, err1 := eng.NewInstantQuery(nil, nil, c.query, time.Unix(10, 0)) - _, err2 := eng.NewRangeQuery(nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) + _, err1 := eng.NewInstantQuery(ctx, nil, nil, c.query, time.Unix(10, 0)) + _, err2 := eng.NewRangeQuery(ctx, nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { require.Equal(t, c.expError, err1) require.Equal(t, c.expError, err2) @@ -2992,7 +2999,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3007,7 +3014,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3022,7 +3029,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}}, + Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, Metric: labels.EmptyLabels(), }, }, @@ -3037,7 +3044,7 @@ func TestRangeQuery(t *testing.T) { Query: "sum_over_time(bar[30s])", Result: Matrix{ Series{ - Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}}, + Floats: []FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, Metric: labels.EmptyLabels(), }, }, @@ -3052,7 +3059,7 @@ func TestRangeQuery(t *testing.T) { Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3067,7 +3074,7 @@ func TestRangeQuery(t *testing.T) { Query: "metric", Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3083,14 +3090,14 @@ func TestRangeQuery(t *testing.T) { Query: `foo > 2 or bar`, Result: Matrix{ Series{ - Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "bar", "job", "2", ), }, Series{ - Points: []Point{{V: 3, T: 60000}, {V: 5, T: 120000}}, + Floats: []FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "foo", "job", "1", @@ -3111,7 +3118,7 @@ func TestRangeQuery(t *testing.T) { err = test.Run() require.NoError(t, err) - qry, err := test.QueryEngine().NewRangeQuery(test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) + qry, err := test.QueryEngine().NewRangeQuery(test.context, test.Queryable(), nil, c.Query, c.Start, c.End, c.Interval) require.NoError(t, err) res := qry.Exec(test.Context()) @@ -3121,6 +3128,1430 @@ func TestRangeQuery(t *testing.T) { } } +func TestNativeHistogramRate(t *testing.T) { + // TODO(beorn7): Integrate histograms into the PromQL testing framework + // and write more tests there. + test, err := NewTest(t, "") + require.NoError(t, err) + defer test.Close() + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + app := test.Storage().Appender(context.TODO()) + for i, h := range tsdbutil.GenerateTestHistograms(100) { + _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + require.NoError(t, test.Run()) + engine := test.QueryEngine() + + queryString := fmt.Sprintf("rate(%s[1m])", seriesName) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + require.NoError(t, err) + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + vector, err := res.Vector() + require.NoError(t, err) + require.Len(t, vector, 1) + actualHistogram := vector[0].H + expectedHistogram := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 8. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + } + require.Equal(t, expectedHistogram, actualHistogram) +} + +func TestNativeFloatHistogramRate(t *testing.T) { + // TODO(beorn7): Integrate histograms into the PromQL testing framework + // and write more tests there. + test, err := NewTest(t, "") + require.NoError(t, err) + defer test.Close() + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + + app := test.Storage().Appender(context.TODO()) + for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { + _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + require.NoError(t, test.Run()) + engine := test.QueryEngine() + + queryString := fmt.Sprintf("rate(%s[1m])", seriesName) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) + require.NoError(t, err) + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + vector, err := res.Vector() + require.NoError(t, err) + require.Len(t, vector, 1) + actualHistogram := vector[0].H + expectedHistogram := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 1, + ZeroThreshold: 0.001, + ZeroCount: 1. / 15., + Count: 8. / 15., + Sum: 1.226666666666667, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, + NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, + } + require.Equal(t, expectedHistogram, actualHistogram) +} + +func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + h := &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + } + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + engine := test.QueryEngine() + + ts := int64(10 * time.Minute / time.Millisecond) + app := test.Storage().Appender(context.TODO()) + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryString := fmt.Sprintf("histogram_count(%s)", seriesName) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if floatHisto { + require.Equal(t, h.ToFloat().Count, vector[0].F) + } else { + require.Equal(t, float64(h.Count), vector[0].F) + } + + queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) + qry, err = engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res = qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err = res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if floatHisto { + require.Equal(t, h.ToFloat().Sum, vector[0].F) + } else { + require.Equal(t, h.Sum, vector[0].F) + } + }) + } +} + +func TestNativeHistogram_HistogramQuantile(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + type subCase struct { + quantile string + value float64 + } + + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Different quantiles to test for this histogram. + subCases []subCase + }{ + { + text: "all positive buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { + quantile: "1", + value: 16, + }, + { + quantile: "0.99", + value: 15.759999999999998, + }, + { + quantile: "0.9", + value: 13.600000000000001, + }, + { + quantile: "0.6", + value: 4.799999999999997, + }, + { + quantile: "0.5", + value: 1.6666666666666665, + }, + { // Zero bucket. + quantile: "0.1", + value: 0.0006000000000000001, + }, + { + quantile: "0", + value: 0, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + { + text: "all negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { // Zero bucket. + quantile: "1", + value: 0, + }, + { // Zero bucket. + quantile: "0.99", + value: -6.000000000000048e-05, + }, + { // Zero bucket. + quantile: "0.9", + value: -0.0005999999999999996, + }, + { + quantile: "0.5", + value: -1.6666666666666667, + }, + { + quantile: "0.1", + value: -13.6, + }, + { + quantile: "0", + value: -16, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + { + text: "both positive and negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: []subCase{ + { + quantile: "1.0001", + value: math.Inf(1), + }, + { + quantile: "1", + value: 16, + }, + { + quantile: "0.99", + value: 15.519999999999996, + }, + { + quantile: "0.9", + value: 11.200000000000003, + }, + { + quantile: "0.7", + value: 1.2666666666666657, + }, + { // Zero bucket. + quantile: "0.55", + value: 0.0006000000000000005, + }, + { // Zero bucket. + quantile: "0.5", + value: 0, + }, + { // Zero bucket. + quantile: "0.45", + value: -0.0005999999999999996, + }, + { + quantile: "0.3", + value: -1.266666666666667, + }, + { + quantile: "0.1", + value: -11.2, + }, + { + quantile: "0.01", + value: -15.52, + }, + { + quantile: "0", + value: -16, + }, + { + quantile: "-1", + value: math.Inf(-1), + }, + }, + }, + } + + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + engine := test.QueryEngine() + ts := idx * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + for j, sc := range c.subCases { + t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { + queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + require.True(t, almostEqual(sc.value, vector[0].F)) + }) + } + idx++ + }) + } + } +} + +func TestNativeHistogram_HistogramFraction(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + type subCase struct { + lower, upper string + value float64 + } + + invariantCases := []subCase{ + { + lower: "42", + upper: "3.1415", + value: 0, + }, + { + lower: "0", + upper: "0", + value: 0, + }, + { + lower: "0.000001", + upper: "0.000001", + value: 0, + }, + { + lower: "42", + upper: "42", + value: 0, + }, + { + lower: "-3.1", + upper: "-3.1", + value: 0, + }, + { + lower: "3.1415", + upper: "NaN", + value: math.NaN(), + }, + { + lower: "NaN", + upper: "42", + value: math.NaN(), + }, + { + lower: "NaN", + upper: "NaN", + value: math.NaN(), + }, + { + lower: "-Inf", + upper: "+Inf", + value: 1, + }, + } + + cases := []struct { + text string + // Histogram to test. + h *histogram.Histogram + // Different ranges to test for this histogram. + subCases []subCase + }{ + { + text: "empty histogram", + h: &histogram.Histogram{}, + subCases: []subCase{ + { + lower: "3.1415", + upper: "42", + value: math.NaN(), + }, + }, + }, + { + text: "all positive buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 1, + }, + { + lower: "-Inf", + upper: "0", + value: 0, + }, + { + lower: "-0.001", + upper: "0", + value: 0, + }, + { + lower: "0", + upper: "0.001", + value: 2. / 12., + }, + { + lower: "0", + upper: "0.0005", + value: 1. / 12., + }, + { + lower: "0.001", + upper: "inf", + value: 10. / 12., + }, + { + lower: "-inf", + upper: "-0.001", + value: 0, + }, + { + lower: "1", + upper: "2", + value: 3. / 12., + }, + { + lower: "1.5", + upper: "2", + value: 1.5 / 12., + }, + { + lower: "1", + upper: "8", + value: 4. / 12., + }, + { + lower: "1", + upper: "6", + value: 3.5 / 12., + }, + { + lower: "1.5", + upper: "6", + value: 2. / 12., + }, + { + lower: "-2", + upper: "-1", + value: 0, + }, + { + lower: "-2", + upper: "-1.5", + value: 0, + }, + { + lower: "-8", + upper: "-1", + value: 0, + }, + { + lower: "-6", + upper: "-1", + value: 0, + }, + { + lower: "-6", + upper: "-1.5", + value: 0, + }, + }, invariantCases...), + }, + { + text: "all negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 12, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 0, + }, + { + lower: "-Inf", + upper: "0", + value: 1, + }, + { + lower: "-0.001", + upper: "0", + value: 2. / 12., + }, + { + lower: "0", + upper: "0.001", + value: 0, + }, + { + lower: "-0.0005", + upper: "0", + value: 1. / 12., + }, + { + lower: "0.001", + upper: "inf", + value: 0, + }, + { + lower: "-inf", + upper: "-0.001", + value: 10. / 12., + }, + { + lower: "1", + upper: "2", + value: 0, + }, + { + lower: "1.5", + upper: "2", + value: 0, + }, + { + lower: "1", + upper: "8", + value: 0, + }, + { + lower: "1", + upper: "6", + value: 0, + }, + { + lower: "1.5", + upper: "6", + value: 0, + }, + { + lower: "-2", + upper: "-1", + value: 3. / 12., + }, + { + lower: "-2", + upper: "-1.5", + value: 1.5 / 12., + }, + { + lower: "-8", + upper: "-1", + value: 4. / 12., + }, + { + lower: "-6", + upper: "-1", + value: 3.5 / 12., + }, + { + lower: "-6", + upper: "-1.5", + value: 2. / 12., + }, + }, invariantCases...), + }, + { + text: "both positive and negative buckets with zero bucket", + h: &histogram.Histogram{ + Count: 24, + ZeroCount: 4, + ZeroThreshold: 0.001, + Sum: 100, // Does not matter. + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -2, 3}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{2, 1, -2, 3}, + }, + subCases: append([]subCase{ + { + lower: "0", + upper: "+Inf", + value: 0.5, + }, + { + lower: "-Inf", + upper: "0", + value: 0.5, + }, + { + lower: "-0.001", + upper: "0", + value: 2. / 24, + }, + { + lower: "0", + upper: "0.001", + value: 2. / 24., + }, + { + lower: "-0.0005", + upper: "0.0005", + value: 2. / 24., + }, + { + lower: "0.001", + upper: "inf", + value: 10. / 24., + }, + { + lower: "-inf", + upper: "-0.001", + value: 10. / 24., + }, + { + lower: "1", + upper: "2", + value: 3. / 24., + }, + { + lower: "1.5", + upper: "2", + value: 1.5 / 24., + }, + { + lower: "1", + upper: "8", + value: 4. / 24., + }, + { + lower: "1", + upper: "6", + value: 3.5 / 24., + }, + { + lower: "1.5", + upper: "6", + value: 2. / 24., + }, + { + lower: "-2", + upper: "-1", + value: 3. / 24., + }, + { + lower: "-2", + upper: "-1.5", + value: 1.5 / 24., + }, + { + lower: "-8", + upper: "-1", + value: 4. / 24., + }, + { + lower: "-6", + upper: "-1", + value: 3.5 / 24., + }, + { + lower: "-6", + upper: "-1.5", + value: 2. / 24., + }, + }, invariantCases...), + }, + } + idx := int64(0) + for _, floatHisto := range []bool{true, false} { + for _, c := range cases { + t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + lbls := labels.FromStrings("__name__", seriesName) + engine := test.QueryEngine() + + ts := idx * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) + } + require.NoError(t, err) + require.NoError(t, app.Commit()) + + for j, sc := range c.subCases { + t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { + queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Len(t, vector, 1) + require.Nil(t, vector[0].H) + if math.IsNaN(sc.value) { + require.True(t, math.IsNaN(vector[0].F)) + return + } + require.Equal(t, sc.value, vector[0].F) + }) + } + idx++ + }) + } + } +} + +func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + histograms []histogram.Histogram + expected histogram.FloatHistogram + expectedAvg histogram.FloatHistogram + }{ + { + histograms: []histogram.Histogram{ + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 21, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 4, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 1, -1, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{2, 2, -3, 8}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 36, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 0, + Count: 36, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + CounterResetHint: histogram.GaugeType, + Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. + }, + }, + expected: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 14, + Count: 93, + Sum: 4691.2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, + }, + expectedAvg: histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Schema: 0, + ZeroThreshold: 0.001, + ZeroCount: 3.5, + Count: 23.25, + Sum: 1172.8, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 7}, + }, + PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 6}, + {Offset: 3, Length: 3}, + }, + NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + seriesNameOverTime := "sparse_histogram_series_over_time" + + engine := test.QueryEngine() + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + for idx1, h := range c.histograms { + lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + + lbls = labels.FromStrings("__name__", seriesNameOverTime) + newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) + } + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, ts int64, exp Vector) { + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Equal(t, exp, vector) + } + + // sum(). + queryString := fmt.Sprintf("sum(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // + operator. + queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) + for idx := 1; idx < len(c.histograms); idx++ { + queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) + } + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // count(). + queryString = fmt.Sprintf("count(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + + // avg(). + queryString = fmt.Sprintf("avg(%s)", seriesName) + queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + + offset := int64(len(c.histograms) - 1) + newTs := ts + offset*int64(time.Minute/time.Millisecond) + + // sum_over_time(). + queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) + queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + + // avg_over_time(). + queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) + queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + }) + idx0++ + } + } +} + +func TestNativeHistogram_SubOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + cases := []struct { + histograms []histogram.Histogram + expected histogram.FloatHistogram + }{ + { + histograms: []histogram.Histogram{ + { + Schema: 0, + Count: 36, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + Schema: 0, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + }, + expected: histogram.FloatHistogram{ + Schema: 0, + Count: 25, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 4}, + }, + PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + {Offset: 1, Length: 1}, + {Offset: 4, Length: 3}, + }, + NegativeBuckets: []float64{1, 1, 7, 5, 5, 2}, + }, + }, + { + histograms: []histogram.Histogram{ + { + Schema: 0, + Count: 36, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + { + Schema: 1, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + }, + expected: histogram.FloatHistogram{ + Schema: 0, + Count: 25, + Sum: 1111.1, + ZeroThreshold: 0.001, + ZeroCount: 2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 1, Length: 5}, + }, + PositiveBuckets: []float64{1, 1, 2, 1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 4, Length: 3}, + }, + NegativeBuckets: []float64{-2, 2, 2, 7, 5, 5, 2}, + }, + }, + { + histograms: []histogram.Histogram{ + { + Schema: 1, + Count: 11, + Sum: 1234.5, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 2, Length: 2}, + }, + NegativeBuckets: []int64{3, -1}, + }, + { + Schema: 0, + Count: 36, + Sum: 2345.6, + ZeroThreshold: 0.001, + ZeroCount: 5, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 4}, + {Offset: 0, Length: 0}, + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 2, Length: 0}, + {Offset: 2, Length: 3}, + }, + NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, + }, + }, + expected: histogram.FloatHistogram{ + Schema: 0, + Count: -25, + Sum: -1111.1, + ZeroThreshold: 0.001, + ZeroCount: -2, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 1, Length: 5}, + }, + PositiveBuckets: []float64{-1, -1, -2, -1, -1, -1}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 4}, + {Offset: 4, Length: 3}, + }, + NegativeBuckets: []float64{2, -2, -2, -7, -5, -5, -2}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + + engine := test.QueryEngine() + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + for idx1, h := range c.histograms { + lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + } + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, exp Vector) { + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Equal(t, exp, vector) + } + + // - operator. + queryString := fmt.Sprintf(`%s{idx="0"}`, seriesName) + for idx := 1; idx < len(c.histograms); idx++ { + queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) + } + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + }) + idx0++ + } + } +} + +func TestNativeHistogram_MulDivOperator(t *testing.T) { + // TODO(codesome): Integrate histograms into the PromQL testing framework + // and write more tests there. + originalHistogram := histogram.Histogram{ + Schema: 0, + Count: 21, + Sum: 33, + ZeroThreshold: 0.001, + ZeroCount: 3, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []int64{3, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []int64{3, 0, 0}, + } + + cases := []struct { + scalar float64 + histogram histogram.Histogram + expectedMul histogram.FloatHistogram + expectedDiv histogram.FloatHistogram + }{ + { + scalar: 3, + histogram: originalHistogram, + expectedMul: histogram.FloatHistogram{ + Schema: 0, + Count: 63, + Sum: 99, + ZeroThreshold: 0.001, + ZeroCount: 9, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{9, 9, 9}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{9, 9, 9}, + }, + expectedDiv: histogram.FloatHistogram{ + Schema: 0, + Count: 7, + Sum: 11, + ZeroThreshold: 0.001, + ZeroCount: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{1, 1, 1}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{1, 1, 1}, + }, + }, + { + scalar: 0, + histogram: originalHistogram, + expectedMul: histogram.FloatHistogram{ + Schema: 0, + Count: 0, + Sum: 0, + ZeroThreshold: 0.001, + ZeroCount: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{0, 0, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{0, 0, 0}, + }, + expectedDiv: histogram.FloatHistogram{ + Schema: 0, + Count: math.Inf(1), + Sum: math.Inf(1), + ZeroThreshold: 0.001, + ZeroCount: math.Inf(1), + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 3}, + }, + NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, + }, + }, + } + + idx0 := int64(0) + for _, c := range cases { + for _, floatHisto := range []bool{true, false} { + t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { + test, err := NewTest(t, "") + require.NoError(t, err) + t.Cleanup(test.Close) + + seriesName := "sparse_histogram_series" + floatSeriesName := "float_series" + + engine := test.QueryEngine() + + ts := idx0 * int64(10*time.Minute/time.Millisecond) + app := test.Storage().Appender(context.TODO()) + h := c.histogram + lbls := labels.FromStrings("__name__", seriesName) + // Since we mutate h later, we need to create a copy here. + if floatHisto { + _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat()) + } else { + _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) + } + require.NoError(t, err) + _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + queryAndCheck := func(queryString string, exp Vector) { + qry, err := engine.NewInstantQuery(test.context, test.Queryable(), nil, queryString, timestamp.Time(ts)) + require.NoError(t, err) + + res := qry.Exec(test.Context()) + require.NoError(t, res.Err) + + vector, err := res.Vector() + require.NoError(t, err) + + require.Equal(t, exp, vector) + } + + // histogram * scalar. + queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // scalar * histogram. + queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // histogram * float. + queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // float * histogram. + queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + + // histogram / scalar. + queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + + // histogram / float. + queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) + queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + }) + idx0++ + } + } +} + func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m @@ -3195,10 +4626,8 @@ metric 0 1 2 if c.engineLookback != 0 { eng.lookbackDelta = c.engineLookback } - opts := &QueryOpts{ - LookbackDelta: c.queryLookback, - } - qry, err := eng.NewInstantQuery(test.Queryable(), opts, query, c.ts) + opts := NewPrometheusQueryOpts(false, c.queryLookback) + qry, err := eng.NewInstantQuery(test.context, test.Queryable(), opts, query, c.ts) require.NoError(t, err) res := qry.Exec(test.Context()) diff --git a/promql/functions.go b/promql/functions.go index e25f3f119e..96bffab96d 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// nolint:revive // Many unsued function arguments in this file by design. package promql import ( @@ -24,6 +25,7 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" ) @@ -53,9 +55,9 @@ type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNo // === time() float64 === func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: float64(enh.Ts) / 1000, - }}} + return Vector{Sample{ + F: float64(enh.Ts) / 1000, + }} } // extrapolatedRate is a utility function for rate/increase/delta. @@ -66,44 +68,71 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) var ( - samples = vals[0].(Matrix)[0] - rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) - rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + samples = vals[0].(Matrix)[0] + rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) + rangeEnd = enh.Ts - durationMilliseconds(vs.Offset) + resultFloat float64 + resultHistogram *histogram.FloatHistogram + firstT, lastT int64 + numSamplesMinusOne int ) - // No sense in trying to compute a rate without at least two points. Drop - // this Vector element. - if len(samples.Points) < 2 { + // We need either at least two Histograms and no Floats, or at least two + // Floats and no Histograms to calculate a rate. Otherwise, drop this + // Vector element. + if len(samples.Histograms) > 0 && len(samples.Floats) > 0 { + // Mix of histograms and floats. TODO(beorn7): Communicate this failure reason. return enh.Out } - resultValue := samples.Points[len(samples.Points)-1].V - samples.Points[0].V - if isCounter { - var lastValue float64 - for _, sample := range samples.Points { - if sample.V < lastValue { - resultValue += lastValue - } - lastValue = sample.V + switch { + case len(samples.Histograms) > 1: + numSamplesMinusOne = len(samples.Histograms) - 1 + firstT = samples.Histograms[0].T + lastT = samples.Histograms[numSamplesMinusOne].T + resultHistogram = histogramRate(samples.Histograms, isCounter) + if resultHistogram == nil { + // The histograms are not compatible with each other. + // TODO(beorn7): Communicate this failure reason. + return enh.Out } + case len(samples.Floats) > 1: + numSamplesMinusOne = len(samples.Floats) - 1 + firstT = samples.Floats[0].T + lastT = samples.Floats[numSamplesMinusOne].T + resultFloat = samples.Floats[numSamplesMinusOne].F - samples.Floats[0].F + if !isCounter { + break + } + // Handle counter resets: + prevValue := samples.Floats[0].F + for _, currPoint := range samples.Floats[1:] { + if currPoint.F < prevValue { + resultFloat += prevValue + } + prevValue = currPoint.F + } + default: + // Not enough samples. TODO(beorn7): Communicate this failure reason. + return enh.Out } // Duration between first/last samples and boundary of range. - durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 - durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + durationToStart := float64(firstT-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-lastT) / 1000 - sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 - averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + sampledInterval := float64(lastT-firstT) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) - if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { - // Counters cannot be negative. If we have any slope at - // all (i.e. resultValue went up), we can extrapolate - // the zero point of the counter. If the duration to the - // zero point is shorter than the durationToStart, we - // take the zero point as the start of the series, - // thereby avoiding extrapolation to negative counter - // values. - durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + // TODO(beorn7): Do this for histograms, too. + if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { + // Counters cannot be negative. If we have any slope at all + // (i.e. resultFloat went up), we can extrapolate the zero point + // of the counter. If the duration to the zero point is shorter + // than the durationToStart, we take the zero point as the start + // of the series, thereby avoiding extrapolation to negative + // counter values. + durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) if durationToZero < durationToStart { durationToStart = durationToZero } @@ -126,14 +155,68 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod } else { extrapolateToInterval += averageDurationBetweenSamples / 2 } - resultValue = resultValue * (extrapolateToInterval / sampledInterval) + factor := extrapolateToInterval / sampledInterval if isRate { - resultValue = resultValue / ms.Range.Seconds() + factor /= ms.Range.Seconds() + } + if resultHistogram == nil { + resultFloat *= factor + } else { + resultHistogram.Mul(factor) } - return append(enh.Out, Sample{ - Point: Point{V: resultValue}, - }) + return append(enh.Out, Sample{F: resultFloat, H: resultHistogram}) +} + +// histogramRate is a helper function for extrapolatedRate. It requires +// points[0] to be a histogram. It returns nil if any other Point in points is +// not a histogram. +func histogramRate(points []HPoint, isCounter bool) *histogram.FloatHistogram { + prev := points[0].H + last := points[len(points)-1].H + if last == nil { + return nil // Range contains a mix of histograms and floats. + } + minSchema := prev.Schema + if last.Schema < minSchema { + minSchema = last.Schema + } + + // First iteration to find out two things: + // - What's the smallest relevant schema? + // - Are all data points histograms? + // TODO(beorn7): Find a way to check that earlier, e.g. by handing in a + // []FloatPoint and a []HistogramPoint separately. + for _, currPoint := range points[1 : len(points)-1] { + curr := currPoint.H + if curr == nil { + return nil // Range contains a mix of histograms and floats. + } + // TODO(trevorwhitney): Check if isCounter is consistent with curr.CounterResetHint. + if !isCounter { + continue + } + if curr.Schema < minSchema { + minSchema = curr.Schema + } + } + + h := last.CopyToSchema(minSchema) + h.Sub(prev) + + if isCounter { + // Second iteration to deal with counter resets. + for _, currPoint := range points[1:] { + curr := currPoint.H + if curr.DetectReset(prev) { + h.Add(prev) + } + prev = curr + } + } + + h.CounterResetHint = histogram.GaugeType + return h.Compact(0) } // === delta(Matrix parser.ValueTypeMatrix) Vector === @@ -165,19 +248,19 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { samples := vals[0].(Matrix)[0] // No sense in trying to compute a rate without at least two points. Drop // this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return out } - lastSample := samples.Points[len(samples.Points)-1] - previousSample := samples.Points[len(samples.Points)-2] + lastSample := samples.Floats[len(samples.Floats)-1] + previousSample := samples.Floats[len(samples.Floats)-2] var resultValue float64 - if isRate && lastSample.V < previousSample.V { + if isRate && lastSample.F < previousSample.F { // Counter reset. - resultValue = lastSample.V + resultValue = lastSample.F } else { - resultValue = lastSample.V - previousSample.V + resultValue = lastSample.F - previousSample.F } sampledInterval := lastSample.T - previousSample.T @@ -191,9 +274,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector { resultValue /= float64(sampledInterval) / 1000 } - return append(out, Sample{ - Point: Point{V: resultValue}, - }) + return append(out, Sample{F: resultValue}) } // Calculate the trend value at the given index i in raw data d. @@ -222,12 +303,12 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode samples := vals[0].(Matrix)[0] // The smoothing factor argument. - sf := vals[1].(Vector)[0].V + sf := vals[1].(Vector)[0].F // The trend factor argument. - tf := vals[2].(Vector)[0].V + tf := vals[2].(Vector)[0].F - // Sanity check the input. + // Check that the input parameters are valid. if sf <= 0 || sf >= 1 { panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) } @@ -235,7 +316,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) } - l := len(samples.Points) + l := len(samples.Floats) // Can't do the smoothing operation with less than two points. if l < 2 { @@ -244,15 +325,15 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode var s0, s1, b float64 // Set initial values. - s1 = samples.Points[0].V - b = samples.Points[1].V - samples.Points[0].V + s1 = samples.Floats[0].F + b = samples.Floats[1].F - samples.Floats[0].F // Run the smoothing operation. var x, y float64 for i := 1; i < l; i++ { // Scale the raw value against the smoothing factor. - x = sf * samples.Points[i].V + x = sf * samples.Floats[i].F // Scale the last smoothed value with the trend at this point. b = calcTrendValue(i-1, tf, s0, s1, b) @@ -261,9 +342,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode s0, s1 = s1, x+y } - return append(enh.Out, Sample{ - Point: Point{V: s1}, - }) + return append(enh.Out, Sample{F: s1}) } // === sort(node parser.ValueTypeVector) Vector === @@ -287,15 +366,15 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V - max := vals[2].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F + max := vals[2].(Vector)[0].F if max < min { return enh.Out } for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, math.Min(max, el.V))}, + F: math.Max(min, math.Min(max, el.F)), }) } return enh.Out @@ -304,11 +383,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].Point.V + max := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Min(max, el.V)}, + F: math.Min(max, el.F), }) } return enh.Out @@ -317,11 +396,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].Point.V + min := vals[1].(Vector)[0].F for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: math.Max(min, el.V)}, + F: math.Max(min, el.F), }) } return enh.Out @@ -334,16 +413,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // Ties are solved by rounding up. toNearest := float64(1) if len(args) >= 2 { - toNearest = vals[1].(Vector)[0].Point.V + toNearest = vals[1].(Vector)[0].F } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest for _, el := range vec { - v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: v}, + F: f, }) } return enh.Out @@ -353,37 +432,63 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcScalar(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { v := vals[0].(Vector) if len(v) != 1 { - return append(enh.Out, Sample{ - Point: Point{V: math.NaN()}, - }) + return append(enh.Out, Sample{F: math.NaN()}) } - return append(enh.Out, Sample{ - Point: Point{V: v[0].V}, - }) + return append(enh.Out, Sample{F: v[0].F}) } -func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { +func aggrOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) float64) Vector { el := vals[0].(Matrix)[0] - return append(enh.Out, Sample{ - Point: Point{V: aggrFn(el.Points)}, - }) + return append(enh.Out, Sample{F: aggrFn(el)}) +} + +func aggrHistOverTime(vals []parser.Value, enh *EvalNodeHelper, aggrFn func(Series) *histogram.FloatHistogram) Vector { + el := vals[0].(Matrix)[0] + + return append(enh.Out, Sample{H: aggrFn(el)}) } // === avg_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. + return enh.Out + } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + count := 1 + mean := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + count++ + left := h.H.Copy().Div(float64(count)) + right := mean.Copy().Div(float64(count)) + // The histogram being added/subtracted must have + // an equal or larger schema. + if h.H.Schema >= mean.Schema { + toAdd := right.Mul(-1).Add(left) + mean.Add(toAdd) + } else { + toAdd := left.Sub(right) + mean = toAdd.Add(mean) + } + } + return mean + }) + } + return aggrOverTime(vals, enh, func(s Series) float64 { var mean, count, c float64 - for _, v := range values { + for _, f := range s.Floats { count++ if math.IsInf(mean, 0) { - if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { - // The `mean` and `v.V` values are `Inf` of the same sign. They + if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { + // The `mean` and `f.F` values are `Inf` of the same sign. They // can't be subtracted, but the value of `mean` is correct // already. continue } - if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + if !math.IsInf(f.F, 0) && !math.IsNaN(f.F) { // At this stage, the mean is an infinite. If the added // value is neither an Inf or a Nan, we can keep that mean // value. @@ -393,7 +498,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(v.V/count-mean/count, mean, c) + mean, c = kahanSumInc(f.F/count-mean/count, mean, c) } if math.IsInf(mean, 0) { @@ -405,8 +510,8 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === count_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - return float64(len(values)) + return aggrOverTime(vals, enh, func(s Series) float64 { + return float64(len(s.Floats) + len(s.Histograms)) }) } @@ -414,19 +519,42 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { el := vals[0].(Matrix)[0] + var f FPoint + if len(el.Floats) > 0 { + f = el.Floats[len(el.Floats)-1] + } + + var h HPoint + if len(el.Histograms) > 0 { + h = el.Histograms[len(el.Histograms)-1] + } + + if h.H == nil || h.T < f.T { + return append(enh.Out, Sample{ + Metric: el.Metric, + F: f.F, + }) + } return append(enh.Out, Sample{ Metric: el.Metric, - Point: Point{V: el.Points[len(el.Points)-1].V}, + H: h.H, }) } // === max_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - max := values[0].V - for _, v := range values { - if v.V > max || math.IsNaN(max) { - max = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. max_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + max := s.Floats[0].F + for _, f := range s.Floats { + if f.F > max || math.IsNaN(max) { + max = f.F } } return max @@ -435,11 +563,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === min_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { - min := values[0].V - for _, v := range values { - if v.V < min || math.IsNaN(min) { - min = v.V + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. min_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { + min := s.Floats[0].F + for _, f := range s.Floats { + if f.F < min || math.IsNaN(min) { + min = f.F } } return min @@ -448,10 +583,30 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sum_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) > 0 && len(vals[0].(Matrix)[0].Histograms) > 0 { + // TODO(zenador): Add warning for mixed floats and histograms. + return enh.Out + } + if len(vals[0].(Matrix)[0].Floats) == 0 { + // The passed values only contain histograms. + return aggrHistOverTime(vals, enh, func(s Series) *histogram.FloatHistogram { + sum := s.Histograms[0].H.Copy() + for _, h := range s.Histograms[1:] { + // The histogram being added must have + // an equal or larger schema. + if h.H.Schema >= sum.Schema { + sum.Add(h.H) + } else { + sum = h.H.Copy().Add(sum) + } + } + return sum + }) + } + return aggrOverTime(vals, enh, func(s Series) float64 { var sum, c float64 - for _, v := range values { - sum, c = kahanSumInc(v.V, sum, c) + for _, f := range s.Floats { + sum, c = kahanSumInc(f.F, sum, c) } if math.IsInf(sum, 0) { return sum @@ -462,29 +617,41 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode // === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] - - values := make(vectorByValueHeap, 0, len(el.Points)) - for _, v := range el.Points { - values = append(values, Sample{Point: Point{V: v.V}}) + if len(el.Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. quantile_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out } - return append(enh.Out, Sample{ - Point: Point{V: quantile(q, values)}, - }) + + values := make(vectorByValueHeap, 0, len(el.Floats)) + for _, f := range el.Floats { + values = append(values, Sample{F: f.F}) + } + return append(enh.Out, Sample{F: quantile(q, values)}) } // === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stddev_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) }) @@ -492,15 +659,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN // === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + if len(vals[0].(Matrix)[0].Floats) == 0 { + // TODO(beorn7): The passed values only contain + // histograms. stdvar_over_time ignores histograms for now. If + // there are only histograms, we have to return without adding + // anything to enh.Out. + return enh.Out + } + return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 var aux, cAux float64 - for _, v := range values { + for _, f := range s.Floats { count++ - delta := v.V - (mean + cMean) + delta := f.F - (mean + cMean) mean, cMean = kahanSumInc(delta/count, mean, cMean) - aux, cAux = kahanSumInc(delta*(v.V-(mean+cMean)), aux, cAux) + aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count }) @@ -514,7 +688,7 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: createLabelsForAbsentFunction(args[0]), - Point: Point{V: 1}, + F: 1, }) } @@ -524,25 +698,24 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe // Due to engine optimization, this function is only called when this condition is true. // Then, the engine post-processes the results to get the expected output. func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return append(enh.Out, - Sample{ - Point: Point{V: 1}, - }) + return append(enh.Out, Sample{F: 1}) } // === present_over_time(Vector parser.ValueTypeMatrix) Vector === func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return aggrOverTime(vals, enh, func(values []Point) float64 { + return aggrOverTime(vals, enh, func(s Series) float64 { return 1 }) } func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { - enh.Out = append(enh.Out, Sample{ - Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(el.V)}, - }) + if el.H == nil { // Process only float samples. + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(el.Metric), + F: f(el.F), + }) + } } return enh.Out } @@ -663,20 +836,20 @@ func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // === pi() Scalar === func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - return Vector{Sample{Point: Point{ - V: math.Pi, - }}} + return Vector{Sample{F: math.Pi}} } // === sgn(Vector parser.ValueTypeVector) Vector === func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { return simpleFunc(vals, enh, func(v float64) float64 { - if v < 0 { + switch { + case v < 0: return -1 - } else if v > 0 { + case v > 0: return 1 + default: + return v } - return v }) } @@ -686,7 +859,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe for _, el := range vec { enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: float64(el.T) / 1000}, + F: float64(el.T) / 1000, }) } return enh.Out @@ -715,7 +888,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { // linearRegression performs a least-square linear regression analysis on the // provided SamplePairs. It returns the slope, and the intercept value at the // provided time. -func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { +func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept float64) { var ( n float64 sumX, cX float64 @@ -725,18 +898,18 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl initY float64 constY bool ) - initY = samples[0].V + initY = samples[0].F constY = true for i, sample := range samples { // Set constY to false if any new y values are encountered. - if constY && i > 0 && sample.V != initY { + if constY && i > 0 && sample.F != initY { constY = false } n += 1.0 x := float64(sample.T-interceptTime) / 1e3 sumX, cX = kahanSumInc(x, sumX, cX) - sumY, cY = kahanSumInc(sample.V, sumY, cY) - sumXY, cXY = kahanSumInc(x*sample.V, sumXY, cXY) + sumY, cY = kahanSumInc(sample.F, sumY, cY) + sumXY, cXY = kahanSumInc(x*sample.F, sumXY, cXY) sumX2, cX2 = kahanSumInc(x*x, sumX2, cX2) } if constY { @@ -745,10 +918,10 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl } return 0, initY } - sumX = sumX + cX - sumY = sumY + cY - sumXY = sumXY + cXY - sumX2 = sumX2 + cX2 + sumX += cX + sumY += cY + sumXY += cXY + sumX2 += cX2 covXY := sumXY - sumX*sumY/n varX := sumX2 - sumX*sumX/n @@ -764,38 +937,87 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // No sense in trying to compute a derivative without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } // We pass in an arbitrary timestamp that is near the values in use // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 - slope, _ := linearRegression(samples.Points, samples.Points[0].T) - return append(enh.Out, Sample{ - Point: Point{V: slope}, - }) + slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + return append(enh.Out, Sample{F: slope}) } // === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector === func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { samples := vals[0].(Matrix)[0] - duration := vals[1].(Vector)[0].V + duration := vals[1].(Vector)[0].F // No sense in trying to predict anything without at least two points. // Drop this Vector element. - if len(samples.Points) < 2 { + if len(samples.Floats) < 2 { return enh.Out } - slope, intercept := linearRegression(samples.Points, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) - return append(enh.Out, Sample{ - Point: Point{V: slope*duration + intercept}, - }) + return append(enh.Out, Sample{F: slope*duration + intercept}) +} + +// === histogram_count(Vector parser.ValueTypeVector) Vector === +func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: sample.H.Count, + }) + } + return enh.Out +} + +// === histogram_sum(Vector parser.ValueTypeVector) Vector === +func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + inVec := vals[0].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: sample.H.Sum, + }) + } + return enh.Out +} + +// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === +func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + lower := vals[0].(Vector)[0].F + upper := vals[1].(Vector)[0].F + inVec := vals[2].(Vector) + + for _, sample := range inVec { + // Skip non-histogram samples. + if sample.H == nil { + continue + } + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: histogramFraction(lower, upper, sample.H), + }) + } + return enh.Out } // === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector === func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - q := vals[0].(Vector)[0].V + q := vals[0].(Vector)[0].F inVec := vals[1].(Vector) if enh.signatureToMetricWithBuckets == nil { @@ -805,33 +1027,64 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev v.buckets = v.buckets[:0] } } - for _, el := range inVec { + + var histogramSamples []Sample + + for _, sample := range inVec { + // We are only looking for conventional buckets here. Remember + // the histograms for later treatment. + if sample.H != nil { + histogramSamples = append(histogramSamples, sample) + continue + } + upperBound, err := strconv.ParseFloat( - el.Metric.Get(model.BucketLabel), 64, + sample.Metric.Get(model.BucketLabel), 64, ) if err != nil { // Oops, no bucket label or malformed label value. Skip. // TODO(beorn7): Issue a warning somehow. continue } - enh.lblBuf = el.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) + enh.lblBuf = sample.Metric.BytesWithoutLabels(enh.lblBuf, labels.BucketLabel) mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)] if !ok { - el.Metric = labels.NewBuilder(el.Metric). + sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(nil) + Labels() - mb = &metricWithBuckets{el.Metric, nil} + mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb } - mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) + mb.buckets = append(mb.buckets, bucket{upperBound, sample.F}) + + } + + // Now deal with the histograms. + for _, sample := range histogramSamples { + // We have to reconstruct the exact same signature as above for + // a conventional histogram, just ignoring any le label. + enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) + if mb, ok := enh.signatureToMetricWithBuckets[string(enh.lblBuf)]; ok && len(mb.buckets) > 0 { + // At this data point, we have conventional histogram + // buckets and a native histogram with the same name and + // labels. Do not evaluate anything. + // TODO(beorn7): Issue a warning somehow. + delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + continue + } + + enh.Out = append(enh.Out, Sample{ + Metric: enh.DropMetricName(sample.Metric), + F: histogramQuantile(q, sample.H), + }) } for _, mb := range enh.signatureToMetricWithBuckets { if len(mb.buckets) > 0 { enh.Out = append(enh.Out, Sample{ Metric: mb.metric, - Point: Point{V: bucketQuantile(q, mb.buckets)}, + F: bucketQuantile(q, mb.buckets), }) } } @@ -841,40 +1094,55 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // === resets(Matrix parser.ValueTypeMatrix) Vector === func funcResets(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats + histograms := vals[0].(Matrix)[0].Histograms resets := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V - if current < prev { - resets++ + + if len(floats) > 1 { + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F + if current < prev { + resets++ + } + prev = current } - prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(resets)}, - }) + if len(histograms) > 1 { + prev := histograms[0].H + for _, sample := range histograms[1:] { + current := sample.H + if current.DetectReset(prev) { + resets++ + } + prev = current + } + } + + return append(enh.Out, Sample{F: float64(resets)}) } // === changes(Matrix parser.ValueTypeMatrix) Vector === func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { - samples := vals[0].(Matrix)[0] - + floats := vals[0].(Matrix)[0].Floats changes := 0 - prev := samples.Points[0].V - for _, sample := range samples.Points[1:] { - current := sample.V + + if len(floats) == 0 { + // TODO(beorn7): Only histogram values, still need to add support. + return enh.Out + } + + prev := floats[0].F + for _, sample := range floats[1:] { + current := sample.F if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { changes++ } prev = current } - return append(enh.Out, Sample{ - Point: Point{V: float64(changes)}, - }) + return append(enh.Out, Sample{F: float64(changes)}) } // === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector === @@ -918,14 +1186,15 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -936,7 +1205,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: vals[0].(Vector)[0].V}, + F: vals[0].(Vector)[0].F, }) } @@ -986,13 +1255,14 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels() enh.Dmn[h] = outMetric } enh.Out = append(enh.Out, Sample{ Metric: outMetric, - Point: Point{V: el.Point.V}, + F: el.F, + H: el.H, }) } return enh.Out @@ -1004,15 +1274,15 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo return append(enh.Out, Sample{ Metric: labels.Labels{}, - Point: Point{V: f(time.Unix(enh.Ts/1000, 0).UTC())}, + F: f(time.Unix(enh.Ts/1000, 0).UTC()), }) } for _, el := range vals[0].(Vector) { - t := time.Unix(int64(el.V), 0).UTC() + t := time.Unix(int64(el.F), 0).UTC() enh.Out = append(enh.Out, Sample{ Metric: enh.DropMetricName(el.Metric), - Point: Point{V: f(t)}, + F: f(t), }) } return enh.Out @@ -1103,7 +1373,10 @@ var FunctionCalls = map[string]FunctionCall{ "deriv": funcDeriv, "exp": funcExp, "floor": funcFloor, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, "holt_winters": funcHoltWinters, "hour": funcHour, "idelta": funcIdelta, @@ -1167,10 +1440,20 @@ func (s vectorByValueHeap) Len() int { } func (s vectorByValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V < s[j].V + return vi < vj } func (s vectorByValueHeap) Swap(i, j int) { @@ -1196,10 +1479,20 @@ func (s vectorByReverseValueHeap) Len() int { } func (s vectorByReverseValueHeap) Less(i, j int) bool { - if math.IsNaN(s[i].V) { + // We compare histograms based on their sum of observations. + // TODO(beorn7): Is that what we want? + vi, vj := s[i].F, s[j].F + if s[i].H != nil { + vi = s[i].H.Sum + } + if s[j].H != nil { + vj = s[j].H.Sum + } + + if math.IsNaN(vi) { return true } - return s[i].V > s[j].V + return vi > vj } func (s vectorByReverseValueHeap) Swap(i, j int) { @@ -1221,7 +1514,7 @@ func (s *vectorByReverseValueHeap) Pop() interface{} { // createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched // in a given expression. It is used in the absent functions. func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { - m := labels.Labels{} + b := labels.NewBuilder(labels.EmptyLabels()) var lm []*labels.Matcher switch n := expr.(type) { @@ -1230,25 +1523,26 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { case *parser.MatrixSelector: lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers default: - return m + return labels.EmptyLabels() } - empty := []string{} + // The 'has' map implements backwards-compatibility for historic behaviour: + // e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output. + // Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`. + has := make(map[string]bool, len(lm)) for _, ma := range lm { if ma.Name == labels.MetricName { continue } - if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { - m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil) + if ma.Type == labels.MatchEqual && !has[ma.Name] { + b.Set(ma.Name, ma.Value) + has[ma.Name] = true } else { - empty = append(empty, ma.Name) + b.Del(ma.Name) } } - for _, v := range empty { - m = labels.NewBuilder(m).Del(v).Labels(nil) - } - return m + return b.Labels() } func stringFromArg(e parser.Expr) string { diff --git a/promql/functions_test.go b/promql/functions_test.go index 1148b1339a..faf6859e77 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -51,20 +51,21 @@ func TestDeriv(t *testing.T) { // https://github.com/prometheus/prometheus/issues/7180 for i = 0; i < 15; i++ { jitter := 12 * i % 2 - a.Append(0, metric, int64(start+interval*i+jitter), 1) + a.Append(0, metric, start+interval*i+jitter, 1) } require.NoError(t, a.Commit()) - query, err := engine.NewInstantQuery(storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939)) + ctx := context.Background() + query, err := engine.NewInstantQuery(ctx, storage, nil, "deriv(foo[30m])", timestamp.Time(1493712846939)) require.NoError(t, err) - result := query.Exec(context.Background()) + result := query.Exec(ctx) require.NoError(t, result.Err) vec, _ := result.Vector() require.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec)) - require.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V) + require.Equal(t, 0.0, vec[0].F, "Expected 0.0 as value, got %f", vec[0].F) } func TestFunctionList(t *testing.T) { diff --git a/promql/fuzz.go b/promql/fuzz.go index 39933378e6..aff6eb15b2 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -58,7 +58,7 @@ const ( ) func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType) + p, warning := textparse.New(in, contentType, false) if warning != nil { // An invalid content type is being passed, which should not happen // in this context. diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 190af2d590..86f1394998 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -349,7 +349,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { //nolint: errcheck - Walk(inspector(f), node, nil) + Walk(f, node, nil) } // Children returns a list of all child nodes of a syntax tree node. @@ -368,13 +368,14 @@ func Children(node Node) []Node { case *AggregateExpr: // While this does not look nice, it should avoid unnecessary allocations // caused by slice resizing - if n.Expr == nil && n.Param == nil { + switch { + case n.Expr == nil && n.Param == nil: return nil - } else if n.Expr == nil { + case n.Expr == nil: return []Node{n.Param} - } else if n.Param == nil { + case n.Param == nil: return []Node{n.Expr} - } else { + default: return []Node{n.Expr, n.Param} } case *BinaryExpr: diff --git a/promql/parser/functions.go b/promql/parser/functions.go index 92afff8b23..479c7f635d 100644 --- a/promql/parser/functions.go +++ b/promql/parser/functions.go @@ -163,6 +163,21 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeVector}, ReturnType: ValueTypeVector, }, + "histogram_count": { + Name: "histogram_count", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_sum": { + Name: "histogram_sum", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_fraction": { + Name: "histogram_fraction", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeScalar, ValueTypeVector}, + ReturnType: ValueTypeVector, + }, "histogram_quantile": { Name: "histogram_quantile", ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, @@ -372,7 +387,7 @@ var Functions = map[string]*Function{ } // getFunction returns a predefined Function object for the given name. -func getFunction(name string) (*Function, bool) { - function, ok := Functions[name] +func getFunction(name string, functions map[string]*Function) (*Function, bool) { + function, ok := functions[name] return function, ok } diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 433f45259c..b28e9d544c 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -16,13 +16,13 @@ package parser import ( "math" - "sort" "strconv" "time" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) + %} %union { @@ -32,6 +32,7 @@ import ( matcher *labels.Matcher label labels.Label labels labels.Labels + lblList []labels.Label strings []string series []SequenceValue uint uint64 @@ -138,10 +139,9 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher - %type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors - -%type label_set label_set_list metric +%type label_set metric +%type label_set_list %type