mirror of
https://github.com/prometheus/prometheus.git
synced 2025-01-22 19:26:56 -08:00
Merge remote-tracking branch 'upstream/main' into sync-prom
Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com>
This commit is contained in:
commit
83d9ee3ab7
|
@ -1,195 +1,33 @@
|
|||
---
|
||||
# Prometheus has switched to GitHub action.
|
||||
# Circle CI is not disabled repository-wise so that previous pull requests
|
||||
# continue working.
|
||||
# This file does not generate any CircleCI workflow.
|
||||
|
||||
version: 2.1
|
||||
|
||||
orbs:
|
||||
prometheus: prometheus/prometheus@0.16.0
|
||||
go: circleci/go@1.7.0
|
||||
win: circleci/windows@2.3.0
|
||||
|
||||
executors:
|
||||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
golang:
|
||||
docker:
|
||||
- image: quay.io/prometheus/golang-builder:1.18-base
|
||||
golang_oldest:
|
||||
docker:
|
||||
- image: quay.io/prometheus/golang-builder:1.17-base
|
||||
- image: busybox
|
||||
|
||||
jobs:
|
||||
test_go:
|
||||
noopjob:
|
||||
executor: golang
|
||||
|
||||
steps:
|
||||
- prometheus/setup_environment
|
||||
- go/load-cache:
|
||||
key: v1
|
||||
- run:
|
||||
command: make GO_ONLY=1
|
||||
environment:
|
||||
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
|
||||
GOGC: "20"
|
||||
# By default Go uses GOMAXPROCS but a Circle CI executor has many
|
||||
# cores (> 30) while the CPU and RAM resources are throttled. If we
|
||||
# don't limit this to the number of allocated cores, the job is
|
||||
# likely to get OOMed and killed.
|
||||
GOOPTS: "-p 2"
|
||||
GOMAXPROCS: "2"
|
||||
GO111MODULE: "on"
|
||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
- prometheus/check_proto:
|
||||
version: "3.15.8"
|
||||
- prometheus/store_artifact:
|
||||
file: prometheus
|
||||
- prometheus/store_artifact:
|
||||
file: promtool
|
||||
- go/save-cache:
|
||||
key: v1
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
|
||||
test_ui:
|
||||
executor: golang
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
|
||||
- v3-npm-deps-
|
||||
- run: make assets-tarball
|
||||
- run: make ui-lint
|
||||
- run: make ui-test
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- .tarballs
|
||||
- save_cache:
|
||||
key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
|
||||
paths:
|
||||
- ~/.npm
|
||||
|
||||
test_windows:
|
||||
executor:
|
||||
name: win/default
|
||||
shell: powershell
|
||||
working_directory: /go/src/github.com/prometheus/prometheus
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
# Temporary workaround until circleci updates go.
|
||||
command: |
|
||||
choco upgrade -y golang
|
||||
- run:
|
||||
command: refreshenv
|
||||
- run:
|
||||
command: |
|
||||
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||
go test $TestTargets -vet=off -v
|
||||
environment:
|
||||
GOGC: "20"
|
||||
GOOPTS: "-p 2"
|
||||
|
||||
test_golang_oldest:
|
||||
executor: golang_oldest
|
||||
steps:
|
||||
- checkout
|
||||
- run: make build
|
||||
- run: go test ./tsdb/...
|
||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||
|
||||
test_mixins:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
- run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
- run: make -C documentation/prometheus-mixin clean
|
||||
- run: make -C documentation/prometheus-mixin jb_install
|
||||
- run: make -C documentation/prometheus-mixin
|
||||
- run: git diff --exit-code
|
||||
|
||||
repo_sync:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
command: "true"
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
prometheus:
|
||||
jobs:
|
||||
- test_go:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_ui:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_golang_oldest:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_mixins:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_windows:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- prometheus/build:
|
||||
name: build
|
||||
parallelism: 3
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
filters:
|
||||
tags:
|
||||
ignore: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
|
||||
branches:
|
||||
ignore: /^(main|release-.*|.*build-all.*)$/
|
||||
- prometheus/build:
|
||||
name: build_all
|
||||
parallelism: 12
|
||||
filters:
|
||||
branches:
|
||||
only: /^(main|release-.*|.*build-all.*)$/
|
||||
tags:
|
||||
only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
|
||||
- prometheus/publish_main:
|
||||
context: org-context
|
||||
requires:
|
||||
- test_go
|
||||
- test_ui
|
||||
- build_all
|
||||
filters:
|
||||
branches:
|
||||
only: main
|
||||
image: circleci/golang:1-node
|
||||
- prometheus/publish_release:
|
||||
context: org-context
|
||||
requires:
|
||||
- test_go
|
||||
- test_ui
|
||||
- build_all
|
||||
filters:
|
||||
tags:
|
||||
only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
image: circleci/golang:1-node
|
||||
daily:
|
||||
- noopjob
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "49 19 * * *"
|
||||
cron: "0 0 30 2 *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
jobs:
|
||||
- repo_sync:
|
||||
context: org-context
|
||||
|
|
74
.github/.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
74
.github/.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
name: Bug report
|
||||
description: Create a report to help us improve.
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for opening a bug report for Prometheus.
|
||||
|
||||
Please do *NOT* ask support questions in Github issues.
|
||||
|
||||
If your issue is not a feature request or bug report use our [community support](https://prometheus.io/community/).
|
||||
|
||||
There is also [commercial support](https://prometheus.io/support-training/) available.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What did you do?
|
||||
description: Please provide steps for us to reproduce this issue.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What did you expect to see?
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What did you see instead? Under which circumstances?
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Environment
|
||||
- type: input
|
||||
attributes:
|
||||
label: System information
|
||||
description: insert output of `uname -srm` here, or operating system version
|
||||
placeholder: e.g. Linux 5.16.15 x86_64
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Prometheus version
|
||||
description: Insert output of `prometheus --version` here.
|
||||
render: text
|
||||
placeholder: |
|
||||
e.g. prometheus, version 2.23.0 (branch: HEAD, revision: 26d89b4b0776fe4cd5a3656dfa520f119a375273)
|
||||
build user: root@37609b3a0a21
|
||||
build date: 20201126-10:56:17
|
||||
go version: go1.15.5
|
||||
platform: linux/amd64
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Prometheus configuration file
|
||||
description: Insert relevant configuration here. Don't forget to remove secrets.
|
||||
render: yaml
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alertmanager version
|
||||
description: Insert output of `alertmanager --version` here (if relevant to the issue).
|
||||
render: text
|
||||
placeholder: |
|
||||
e.g. alertmanager, version 0.22.2 (branch: HEAD, revision: 44f8adc06af5101ad64bd8b9c8b18273f2922051)
|
||||
build user: root@b595c7f32520
|
||||
build date: 20210602-07:50:37
|
||||
go version: go1.16.4
|
||||
platform: linux/amd64
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alertmanager configuration file
|
||||
description: Insert relevant configuration here. Don't forget to remove secrets.
|
||||
render: yaml
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Logs
|
||||
description: Insert Prometheus and Alertmanager logs relevant to the issue here.
|
||||
render: text
|
8
.github/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Prometheus Community Support
|
||||
url: https://prometheus.io/community/
|
||||
about: If you need help or support, please request help here.
|
||||
- name: Commercial Support & Training
|
||||
url: https://prometheus.io/support-training/
|
||||
about: If you want commercial support or training, vendors are listed here.
|
26
.github/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
26
.github/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project.
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
Please do *NOT* ask support questions in Github issues.
|
||||
|
||||
If your issue is not a feature request or bug report use our
|
||||
community support.
|
||||
|
||||
https://prometheus.io/community/
|
||||
|
||||
There is also commercial support available.
|
||||
|
||||
https://prometheus.io/support-training/
|
||||
|
||||
-->
|
||||
## Proposal
|
||||
**Use case. Why is this important?**
|
||||
|
||||
*“Nice to have” is not a good use case. :)*
|
17
.github/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
17
.github/.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
<!--
|
||||
Don't forget!
|
||||
|
||||
- Please sign CNCF's Developer Certificate of Origin and sign-off your commits by adding the -s / --sign-off flag to `git commit`. See https://github.com/apps/dco for more information.
|
||||
|
||||
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
||||
|
||||
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
||||
|
||||
- No tests are needed for internal implementation changes.
|
||||
|
||||
- Performance improvements would need a benchmark test to prove it.
|
||||
|
||||
- All exposed objects should have a comment.
|
||||
|
||||
- All comments should start with a capital letter and end with a full stop.
|
||||
-->
|
22
.github/.github/dependabot.yml
vendored
Normal file
22
.github/.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/documentation/examples/remote_storage"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web/ui"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
56
.github/.github/stale.yml
vendored
Normal file
56
.github/.github/stale.yml
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 60
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: false
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- keepalive
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: false
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
only: pulls
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
30
.github/.github/workflows/golangci-lint.yml
vendored
Normal file
30
.github/.github/workflows/golangci-lint.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
- ".golangci.yml"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.45.2
|
22
.github/.github/workflows/lock.yml
vendored
Normal file
22
.github/.github/workflows/lock.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '13 23 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
process-only: 'issues'
|
||||
issue-inactive-days: '180'
|
||||
github-token: ${{ secrets.PROMBOT_LOCKTHREADS_TOKEN }}
|
30
.github/.github/workflows/test.yml
vendored
Normal file
30
.github/.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
name: ci
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Upgrade golang
|
||||
run: |
|
||||
cd /tmp
|
||||
wget https://dl.google.com/go/go1.17.7.linux-amd64.tar.gz
|
||||
tar -zxvf go1.17.7.linux-amd64.tar.gz
|
||||
sudo rm -fr /usr/local/go
|
||||
sudo mv /tmp/go /usr/local/go
|
||||
cd -
|
||||
ls -l /usr/bin/go
|
||||
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# This file would normally be created by `make assets`, here we just
|
||||
# mock it because the file is required for the tests to pass.
|
||||
- name: Mock building of necessary react file
|
||||
run: mkdir web/ui/static/react && touch web/ui/static/react/index.html
|
||||
|
||||
- name: Run Tests
|
||||
run: GO=/usr/local/go/bin/go make common-test
|
44
.github/.github/workflows/ui_build_and_release.yml
vendored
Normal file
44
.github/.github/workflows/ui_build_and_release.yml
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
name: ui_build_and_release
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v0.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
release:
|
||||
name: release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
- uses: actions/cache@v3.0.4
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Check libraries version
|
||||
## This step is verifying that the version of each package is matching the tag
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.ref_name, 'v') }}
|
||||
run: ./scripts/ui_release.sh --check-package "${{ github.ref_name }}"
|
||||
- name: build
|
||||
run: make assets
|
||||
- name: Copy files before publishing libs
|
||||
run: ./scripts/ui_release.sh --copy
|
||||
- name: Publish dry-run libraries
|
||||
if: ${{ github.event_name == 'pull_request' || github.ref_name == 'main' }}
|
||||
run: ./scripts/ui_release.sh --publish dry-run
|
||||
- name: Publish libraries
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.ref_name, 'v') }}
|
||||
run: ./scripts/ui_release.sh --publish
|
||||
env:
|
||||
# The setup-node action writes an .npmrc file with this env variable
|
||||
# as the placeholder for the auth token
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
8
.gitpod.Dockerfile
vendored
8
.gitpod.Dockerfile
vendored
|
@ -1,7 +1,15 @@
|
|||
FROM gitpod/workspace-full
|
||||
|
||||
ENV CUSTOM_NODE_VERSION=16
|
||||
ENV CUSTOM_GO_VERSION=1.19
|
||||
ENV GOPATH=$HOME/go-packages
|
||||
ENV GOROOT=$HOME/go
|
||||
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
|
||||
|
||||
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
||||
|
||||
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
||||
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
|
||||
&& printf '%s\n' 'export GOPATH=/workspace/go' \
|
||||
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
go:
|
||||
# Whenever the Go version is updated here,
|
||||
# .circle/config.yml should also be updated.
|
||||
version: 1.18
|
||||
version: 1.19
|
||||
repository:
|
||||
path: github.com/prometheus/prometheus
|
||||
build:
|
||||
|
|
40
CHANGELOG.md
40
CHANGELOG.md
|
@ -1,6 +1,43 @@
|
|||
# Changelog
|
||||
|
||||
## 2.37.0-rc.0 / 2022-07-05
|
||||
## 2.39.0 / 2022-10-05
|
||||
|
||||
* [FEATURE] **experimental** TSDB: Add support for ingesting out-of-order samples. This is configured via `out_of_order_time_window` field in the config file; check config file docs for more info. #11075
|
||||
* [ENHANCEMENT] API: `/-/healthy` and `/-/ready` API calls now also respond to a `HEAD` request on top of existing `GET` support. #11160
|
||||
* [ENHANCEMENT] PuppetDB SD: Add `__meta_puppetdb_query` label. #11238
|
||||
* [ENHANCEMENT] AWS EC2 SD: Add `__meta_ec2_region` label. #11326
|
||||
* [ENHANCEMENT] AWS Lightsail SD: Add `__meta_lightsail_region` label. #11326
|
||||
* [ENHANCEMENT] Scrape: Optimise relabeling by re-using memory. #11147
|
||||
* [ENHANCEMENT] TSDB: Improve WAL replay timings. #10973 #11307 #11319
|
||||
* [ENHANCEMENT] TSDB: Optimise memory by not storing unnecessary data in the memory. #11280 #11288 #11296
|
||||
* [ENHANCEMENT] TSDB: Allow overlapping blocks by default. `--storage.tsdb.allow-overlapping-blocks` now has no effect. #11331
|
||||
* [ENHANCEMENT] UI: Click to copy label-value pair from query result to clipboard. #11229
|
||||
* [BUGFIX] TSDB: Turn off isolation for Head compaction to fix a memory leak. #11317
|
||||
* [BUGFIX] TSDB: Fix 'invalid magic number 0' error on Prometheus startup. #11338
|
||||
* [BUGFIX] PromQL: Properly close file descriptor when logging unfinished queries. #11148
|
||||
* [BUGFIX] Agent: Fix validation of flag options and prevent WAL from growing more than desired. #9876
|
||||
|
||||
## 2.38.0 / 2022-08-16
|
||||
|
||||
* [FEATURE]: Web: Add a `/api/v1/format_query` HTTP API endpoint that allows pretty-formatting PromQL expressions. #11036 #10544 #11005
|
||||
* [FEATURE]: UI: Add support for formatting PromQL expressions in the UI. #11039
|
||||
* [FEATURE]: DNS SD: Support MX records for discovering targets. #10099
|
||||
* [FEATURE]: Templates: Add `toTime()` template function that allows converting sample timestamps to Go `time.Time` values. #10993
|
||||
* [ENHANCEMENT]: Kubernetes SD: Add `__meta_kubernetes_service_port_number` meta label indicating the service port number. #11002 #11053
|
||||
* [ENHANCEMENT]: Kubernetes SD: Add `__meta_kubernetes_pod_container_image` meta label indicating the container image. #11034 #11146
|
||||
* [ENHANCEMENT]: PromQL: When a query panics, also log the query itself alongside the panic message. #10995
|
||||
* [ENHANCEMENT]: UI: Tweak colors in the dark theme to improve the contrast ratio. #11068
|
||||
* [ENHANCEMENT]: Web: Speed up calls to `/api/v1/rules` by avoiding locks and using atomic types instead. #10858
|
||||
* [ENHANCEMENT]: Scrape: Add a `no-default-scrape-port` feature flag, which omits or removes any default HTTP (`:80`) or HTTPS (`:443`) ports in the target's scrape address. #9523
|
||||
* [BUGFIX]: TSDB: In the WAL watcher metrics, expose the `type="exemplar"` label instead of `type="unknown"` for exemplar records. #11008
|
||||
* [BUGFIX]: TSDB: Fix race condition around allocating series IDs during chunk snapshot loading. #11099
|
||||
|
||||
## 2.37.0 / 2022-07-14
|
||||
|
||||
This release is a LTS (Long-Term Support) release of Prometheus and will
|
||||
receive security, documentation and bugfix patches for at least 6 months.
|
||||
Please read more about our LTS release cycle at
|
||||
<https://prometheus.io/docs/introduction/release-cycle/>.
|
||||
|
||||
Following data loss by users due to lack of unified buffer cache in OpenBSD, we
|
||||
will no longer release Prometheus upstream for OpenBSD until a proper solution is
|
||||
|
@ -11,6 +48,7 @@ found. #8799
|
|||
* [ENHANCEMENT] PromQL: Optimise creation of signature with/without labels. #10667
|
||||
* [ENHANCEMENT] TSDB: Memory optimizations. #10873 #10874
|
||||
* [ENHANCEMENT] TSDB: Reduce sleep time when reading WAL. #10859 #10878
|
||||
* [ENHANCEMENT] OAuth2: Add appropriate timeouts and User-Agent header. #11020
|
||||
* [BUGFIX] Alerting: Fix Alertmanager targets not being updated when alerts were queued. #10948
|
||||
* [BUGFIX] Hetzner SD: Make authentication files relative to Prometheus config file. #10813
|
||||
* [BUGFIX] Promtool: Fix `promtool check config` not erroring properly on failures. #10952
|
||||
|
|
|
@ -58,16 +58,19 @@ endif
|
|||
PROMU_VERSION ?= 0.13.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.45.2
|
||||
GOLANGCI_LINT_VERSION ?= v1.49.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifeq (,$(CIRCLE_JOB))
|
||||
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||
GOLANGCI_LINT :=
|
||||
else ifeq (,$(CIRCLE_JOB))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
|
|
|
@ -42,8 +42,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) |
|
||||
| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) |
|
||||
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.38 | 2022-08-10 | **searching for volunteer** |
|
||||
| v2.39 | 2022-09-21 | **searching for volunteer** |
|
||||
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.40 | 2022-11-02 | **searching for volunteer** |
|
||||
| v2.41 | 2022-12-14 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
@ -139,6 +141,7 @@ For release candidates still update `CHANGELOG.md`, but when you cut the final r
|
|||
|
||||
Entries in the `CHANGELOG.md` are meant to be in this order:
|
||||
|
||||
* `[SECURITY]` - A bugfix that specifically fixes a security issue.
|
||||
* `[CHANGE]`
|
||||
* `[FEATURE]`
|
||||
* `[ENHANCEMENT]`
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/grafana/regexp"
|
||||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
"github.com/oklog/run"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -48,8 +48,8 @@ import (
|
|||
toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
klog "k8s.io/klog"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
"k8s.io/klog"
|
||||
klogv2 "k8s.io/klog/v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
|
@ -58,6 +58,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||
|
@ -178,7 +179,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics")
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
|
@ -191,6 +192,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "no-default-scrape-port":
|
||||
c.scrape.NoDefaultPort = true
|
||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -308,8 +312,10 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
|
||||
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
|
||||
// TODO: Remove in Prometheus 3.0.
|
||||
var b bool
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now.").
|
||||
Default("true").Hidden().BoolVar(&b)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
@ -390,7 +396,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -1004,7 +1010,6 @@ func main() {
|
|||
"NoLockfile", cfg.tsdb.NoLockfile,
|
||||
"RetentionDuration", cfg.tsdb.RetentionDuration,
|
||||
"WALSegmentSize", cfg.tsdb.WALSegmentSize,
|
||||
"AllowOverlappingBlocks", cfg.tsdb.AllowOverlappingBlocks,
|
||||
"WALCompression", cfg.tsdb.WALCompression,
|
||||
)
|
||||
|
||||
|
@ -1406,6 +1411,10 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels,
|
|||
return 0, tsdb.ErrNotReady
|
||||
}
|
||||
|
||||
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||
return 0, tsdb.ErrNotReady
|
||||
}
|
||||
|
||||
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
|
||||
|
||||
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
|
||||
|
@ -1523,7 +1532,6 @@ type tsdbOptions struct {
|
|||
RetentionDuration model.Duration
|
||||
MaxBytes units.Base2Bytes
|
||||
NoLockfile bool
|
||||
AllowOverlappingBlocks bool
|
||||
WALCompression bool
|
||||
HeadChunksWriteQueueSize int
|
||||
StripeSize int
|
||||
|
@ -1542,8 +1550,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
|
||||
MaxBytes: int64(opts.MaxBytes),
|
||||
NoLockfile: opts.NoLockfile,
|
||||
AllowOverlappingCompaction: opts.AllowOverlappingBlocks,
|
||||
AllowOverlappingQueries: opts.AllowOverlappingBlocks,
|
||||
AllowOverlappingCompaction: true,
|
||||
WALCompression: opts.WALCompression,
|
||||
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
||||
StripeSize: opts.StripeSize,
|
||||
|
|
|
@ -38,6 +38,8 @@ import (
|
|||
"github.com/prometheus/prometheus/rules"
|
||||
)
|
||||
|
||||
const startupTime = 10 * time.Second
|
||||
|
||||
var (
|
||||
promPath = os.Args[0]
|
||||
promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
|
||||
|
@ -145,8 +147,8 @@ func TestSendAlerts(t *testing.T) {
|
|||
{
|
||||
in: []*rules.Alert{
|
||||
{
|
||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
||||
Labels: labels.FromStrings("l1", "v1"),
|
||||
Annotations: labels.FromStrings("a2", "v2"),
|
||||
ActiveAt: time.Unix(1, 0),
|
||||
FiredAt: time.Unix(2, 0),
|
||||
ValidUntil: time.Unix(3, 0),
|
||||
|
@ -154,8 +156,8 @@ func TestSendAlerts(t *testing.T) {
|
|||
},
|
||||
exp: []*notifier.Alert{
|
||||
{
|
||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
||||
Labels: labels.FromStrings("l1", "v1"),
|
||||
Annotations: labels.FromStrings("a2", "v2"),
|
||||
StartsAt: time.Unix(2, 0),
|
||||
EndsAt: time.Unix(3, 0),
|
||||
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
||||
|
@ -165,8 +167,8 @@ func TestSendAlerts(t *testing.T) {
|
|||
{
|
||||
in: []*rules.Alert{
|
||||
{
|
||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
||||
Labels: labels.FromStrings("l1", "v1"),
|
||||
Annotations: labels.FromStrings("a2", "v2"),
|
||||
ActiveAt: time.Unix(1, 0),
|
||||
FiredAt: time.Unix(2, 0),
|
||||
ResolvedAt: time.Unix(4, 0),
|
||||
|
@ -174,8 +176,8 @@ func TestSendAlerts(t *testing.T) {
|
|||
},
|
||||
exp: []*notifier.Alert{
|
||||
{
|
||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
||||
Labels: labels.FromStrings("l1", "v1"),
|
||||
Annotations: labels.FromStrings("a2", "v2"),
|
||||
StartsAt: time.Unix(2, 0),
|
||||
EndsAt: time.Unix(4, 0),
|
||||
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
||||
|
@ -226,7 +228,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
select {
|
||||
case err := <-done:
|
||||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
|
@ -272,7 +274,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
select {
|
||||
case err := <-done:
|
||||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
|
@ -366,7 +368,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||
case err := <-done:
|
||||
t.Logf("prometheus agent should be still running: %v", err)
|
||||
actualExitStatus = prom.ProcessState.ExitCode()
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
}
|
||||
require.Equal(t, 0, actualExitStatus)
|
||||
|
@ -387,7 +389,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||
case err := <-done:
|
||||
t.Logf("prometheus agent should not be running: %v", err)
|
||||
actualExitStatus = prom.ProcessState.ExitCode()
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
}
|
||||
|
||||
|
@ -411,7 +413,7 @@ func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
|||
case err := <-done:
|
||||
t.Logf("prometheus agent should not be running: %v", err)
|
||||
actualExitStatus = prom.ProcessState.ExitCode()
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
}
|
||||
require.Equal(t, 2, actualExitStatus)
|
||||
|
@ -462,7 +464,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
select {
|
||||
case err := <-done:
|
||||
t.Errorf("prometheus should be still running: %v", err)
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ import (
|
|||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/exporter-toolkit/web"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
@ -211,7 +211,7 @@ func main() {
|
|||
"A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.",
|
||||
).Required().ExistingFiles()
|
||||
|
||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||
|
||||
parsedCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
|
||||
|
||||
|
@ -223,10 +223,13 @@ func main() {
|
|||
p = &promqlPrinter{}
|
||||
}
|
||||
|
||||
var noDefaultScrapePort bool
|
||||
for _, f := range *featureList {
|
||||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "no-default-scrape-port":
|
||||
noDefaultScrapePort = true
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -239,7 +242,7 @@ func main() {
|
|||
|
||||
switch parsedCmd {
|
||||
case sdCheckCmd.FullCommand():
|
||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout))
|
||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort))
|
||||
|
||||
case checkConfigCmd.FullCommand():
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||
|
@ -1217,7 +1220,7 @@ func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *
|
|||
|
||||
func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
|
||||
for _, tg := range targetGroups {
|
||||
_, failures := scrape.TargetsFromGroup(tg, scfg)
|
||||
_, failures := scrape.TargetsFromGroup(tg, scfg, false)
|
||||
if len(failures) > 0 {
|
||||
first := failures[0]
|
||||
return first
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
const maxSamplesInMemory = 5000
|
||||
|
||||
type queryRangeAPI interface {
|
||||
QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error)
|
||||
QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error)
|
||||
}
|
||||
|
||||
type ruleImporter struct {
|
||||
|
@ -165,7 +165,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
lb.Set(labels.MetricName, ruleName)
|
||||
|
||||
for _, value := range sample.Values {
|
||||
if err := app.add(ctx, lb.Labels(), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
||||
if err := app.add(ctx, lb.Labels(nil), timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
||||
return fmt.Errorf("add: %w", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ type mockQueryRangeAPI struct {
|
|||
samples model.Matrix
|
||||
}
|
||||
|
||||
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range) (model.Value, v1.Warnings, error) {
|
||||
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
||||
return mockAPI.samples, v1.Warnings{}, nil
|
||||
}
|
||||
|
||||
|
@ -117,8 +117,6 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.AllowOverlappingQueries = true
|
||||
opts.AllowOverlappingCompaction = true
|
||||
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -135,10 +133,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
series := selectedSeries.At()
|
||||
if len(series.Labels()) != 3 {
|
||||
require.Equal(t, 2, len(series.Labels()))
|
||||
x := labels.Labels{
|
||||
labels.Label{Name: "__name__", Value: "grp2_rule1"},
|
||||
labels.Label{Name: "name1", Value: "val1"},
|
||||
}
|
||||
x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1")
|
||||
require.Equal(t, x, series.Labels())
|
||||
} else {
|
||||
require.Equal(t, 3, len(series.Labels()))
|
||||
|
@ -249,8 +244,6 @@ func TestBackfillLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.AllowOverlappingQueries = true
|
||||
opts.AllowOverlappingCompaction = true
|
||||
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -261,10 +254,7 @@ func TestBackfillLabels(t *testing.T) {
|
|||
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||
for selectedSeries.Next() {
|
||||
series := selectedSeries.At()
|
||||
expectedLabels := labels.Labels{
|
||||
labels.Label{Name: "__name__", Value: "rulename"},
|
||||
labels.Label{Name: "name1", Value: "value-from-rule"},
|
||||
}
|
||||
expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule")
|
||||
require.Equal(t, expectedLabels, series.Labels())
|
||||
}
|
||||
require.NoError(t, selectedSeries.Err())
|
||||
|
|
|
@ -37,7 +37,7 @@ type sdCheckResult struct {
|
|||
}
|
||||
|
||||
// CheckSD performs service discovery for the given job name and reports the results.
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int {
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
|
@ -94,7 +94,7 @@ outerLoop:
|
|||
}
|
||||
results := []sdCheckResult{}
|
||||
for _, tgs := range sdCheckResults {
|
||||
results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
|
||||
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
|
||||
}
|
||||
|
||||
res, err := json.MarshalIndent(results, "", " ")
|
||||
|
@ -107,7 +107,7 @@ outerLoop:
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
||||
sdCheckResults := []sdCheckResult{}
|
||||
for _, targetGroup := range targetGroups {
|
||||
for _, target := range targetGroup.Targets {
|
||||
|
@ -124,7 +124,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
|
|||
}
|
||||
|
||||
targetLabels := labels.New(labelSlice...)
|
||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig)
|
||||
res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig, noDefaultScrapePort)
|
||||
result := sdCheckResult{
|
||||
DiscoveredLabels: orig,
|
||||
Labels: res,
|
||||
|
|
|
@ -51,23 +51,23 @@ func TestSDCheckResult(t *testing.T) {
|
|||
|
||||
expectedSDCheckResult := []sdCheckResult{
|
||||
{
|
||||
DiscoveredLabels: labels.Labels{
|
||||
labels.Label{Name: "__address__", Value: "localhost:8080"},
|
||||
labels.Label{Name: "__scrape_interval__", Value: "1m"},
|
||||
labels.Label{Name: "__scrape_timeout__", Value: "10s"},
|
||||
labels.Label{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Labels: labels.Labels{
|
||||
labels.Label{Name: "__address__", Value: "localhost:8080"},
|
||||
labels.Label{Name: "__scrape_interval__", Value: "1m"},
|
||||
labels.Label{Name: "__scrape_timeout__", Value: "10s"},
|
||||
labels.Label{Name: "foo", Value: "bar"},
|
||||
labels.Label{Name: "instance", Value: "localhost:8080"},
|
||||
labels.Label{Name: "newfoo", Value: "bar"},
|
||||
},
|
||||
DiscoveredLabels: labels.FromStrings(
|
||||
"__address__", "localhost:8080",
|
||||
"__scrape_interval__", "1m",
|
||||
"__scrape_timeout__", "10s",
|
||||
"foo", "bar",
|
||||
),
|
||||
Labels: labels.FromStrings(
|
||||
"__address__", "localhost:8080",
|
||||
"__scrape_interval__", "1m",
|
||||
"__scrape_timeout__", "10s",
|
||||
"foo", "bar",
|
||||
"instance", "localhost:8080",
|
||||
"newfoo", "bar",
|
||||
),
|
||||
Error: nil,
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
|
||||
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/sigv4"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -508,8 +508,9 @@ type StorageConfig struct {
|
|||
// TSDBConfig configures runtime reloadable configuration options.
|
||||
type TSDBConfig struct {
|
||||
// OutOfOrderTimeWindow sets how long back in time an out-of-order sample can be inserted
|
||||
// into the TSDB. This is the one finally used by the TSDB and should be in the same unit
|
||||
// as other timestamps in the TSDB.
|
||||
// into the TSDB. This flag is typically set while unmarshaling the configuration file and translating
|
||||
// OutOfOrderTimeWindowFlag's duration. The unit of this flag is expected to be the same as any
|
||||
// other timestamp in the TSDB.
|
||||
OutOfOrderTimeWindow int64
|
||||
|
||||
// OutOfOrderTimeWindowFlag holds the parsed duration from the config file.
|
||||
|
|
|
@ -74,10 +74,7 @@ var expectedConf = &Config{
|
|||
EvaluationInterval: model.Duration(30 * time.Second),
|
||||
QueryLogFile: "",
|
||||
|
||||
ExternalLabels: labels.Labels{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "monitor", Value: "codelab"},
|
||||
},
|
||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||
},
|
||||
|
||||
RuleFiles: []string{
|
||||
|
@ -1098,6 +1095,12 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
},
|
||||
StorageConfig: StorageConfig{
|
||||
TSDBConfig: &TSDBConfig{
|
||||
OutOfOrderTimeWindow: 30 * time.Minute.Milliseconds(),
|
||||
OutOfOrderTimeWindowFlag: model.Duration(30 * time.Minute),
|
||||
},
|
||||
},
|
||||
TracingConfig: TracingConfig{
|
||||
Endpoint: "localhost:4317",
|
||||
ClientType: TracingClientGRPC,
|
||||
|
@ -1655,28 +1658,16 @@ func TestExpandExternalLabels(t *testing.T) {
|
|||
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2])
|
||||
require.Equal(t, labels.Label{Name: "qux", Value: "foo$${TEST}"}, c.GlobalConfig.ExternalLabels[3])
|
||||
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$$bar"}, c.GlobalConfig.ExternalLabels[4])
|
||||
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2])
|
||||
require.Equal(t, labels.Label{Name: "qux", Value: "foo${TEST}"}, c.GlobalConfig.ExternalLabels[3])
|
||||
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$bar"}, c.GlobalConfig.ExternalLabels[4])
|
||||
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
os.Setenv("TEST", "TestValue")
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
|
||||
require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1])
|
||||
require.Equal(t, labels.Label{Name: "foo", Value: "TestValue"}, c.GlobalConfig.ExternalLabels[2])
|
||||
require.Equal(t, labels.Label{Name: "qux", Value: "foo${TEST}"}, c.GlobalConfig.ExternalLabels[3])
|
||||
require.Equal(t, labels.Label{Name: "xyz", Value: "foo$bar"}, c.GlobalConfig.ExternalLabels[4])
|
||||
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
|
|
4
config/testdata/conf.good.yml
vendored
4
config/testdata/conf.good.yml
vendored
|
@ -391,6 +391,10 @@ alerting:
|
|||
- "1.2.3.5:9093"
|
||||
- "1.2.3.6:9093"
|
||||
|
||||
storage:
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
||||
|
||||
tracing:
|
||||
endpoint: "localhost:4317"
|
||||
client_type: "grpc"
|
||||
|
|
|
@ -57,6 +57,7 @@ const (
|
|||
ec2LabelPrivateIP = ec2Label + "private_ip"
|
||||
ec2LabelPublicDNS = ec2Label + "public_dns_name"
|
||||
ec2LabelPublicIP = ec2Label + "public_ip"
|
||||
ec2LabelRegion = ec2Label + "region"
|
||||
ec2LabelSubnetID = ec2Label + "subnet_id"
|
||||
ec2LabelTag = ec2Label + "tag_"
|
||||
ec2LabelVPCID = ec2Label + "vpc_id"
|
||||
|
@ -242,6 +243,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
|
||||
labels := model.LabelSet{
|
||||
ec2LabelInstanceID: model.LabelValue(*inst.InstanceId),
|
||||
ec2LabelRegion: model.LabelValue(d.cfg.Region),
|
||||
}
|
||||
|
||||
if r.OwnerId != nil {
|
||||
|
|
|
@ -49,6 +49,7 @@ const (
|
|||
lightsailLabelIPv6Addresses = lightsailLabel + "ipv6_addresses"
|
||||
lightsailLabelPrivateIP = lightsailLabel + "private_ip"
|
||||
lightsailLabelPublicIP = lightsailLabel + "public_ip"
|
||||
lightsailLabelRegion = lightsailLabel + "region"
|
||||
lightsailLabelTag = lightsailLabel + "tag_"
|
||||
lightsailLabelSeparator = ","
|
||||
)
|
||||
|
@ -199,6 +200,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
lightsailLabelInstanceState: model.LabelValue(*inst.State.Name),
|
||||
lightsailLabelInstanceSupportCode: model.LabelValue(*inst.SupportCode),
|
||||
lightsailLabelPrivateIP: model.LabelValue(*inst.PrivateIpAddress),
|
||||
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
|
||||
}
|
||||
|
||||
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
|
||||
|
|
|
@ -40,6 +40,8 @@ const (
|
|||
dnsSrvRecordPrefix = model.MetaLabelPrefix + "dns_srv_record_"
|
||||
dnsSrvRecordTargetLabel = dnsSrvRecordPrefix + "target"
|
||||
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
||||
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
||||
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
||||
|
||||
// Constants for instrumentation.
|
||||
namespace = "prometheus"
|
||||
|
@ -100,7 +102,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
switch strings.ToUpper(c.Type) {
|
||||
case "SRV":
|
||||
case "A", "AAAA":
|
||||
case "A", "AAAA", "MX":
|
||||
if c.Port == 0 {
|
||||
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
||||
}
|
||||
|
@ -136,6 +138,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
|
|||
qtype = dns.TypeAAAA
|
||||
case "SRV":
|
||||
qtype = dns.TypeSRV
|
||||
case "MX":
|
||||
qtype = dns.TypeMX
|
||||
}
|
||||
d := &Discovery{
|
||||
names: conf.Names,
|
||||
|
@ -195,7 +199,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
}
|
||||
|
||||
for _, record := range response.Answer {
|
||||
var target, dnsSrvRecordTarget, dnsSrvRecordPort model.LabelValue
|
||||
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue
|
||||
|
||||
switch addr := record.(type) {
|
||||
case *dns.SRV:
|
||||
|
@ -206,6 +210,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
addr.Target = strings.TrimRight(addr.Target, ".")
|
||||
|
||||
target = hostPort(addr.Target, int(addr.Port))
|
||||
case *dns.MX:
|
||||
dnsMxRecordTarget = model.LabelValue(addr.Mx)
|
||||
|
||||
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
||||
|
||||
target = hostPort(addr.Mx, d.port)
|
||||
case *dns.A:
|
||||
target = hostPort(addr.A.String(), d.port)
|
||||
case *dns.AAAA:
|
||||
|
@ -222,6 +233,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
dnsNameLabel: model.LabelValue(name),
|
||||
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
||||
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
||||
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -240,22 +252,22 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
//
|
||||
// There are three possible outcomes:
|
||||
//
|
||||
// 1. One of the permutations of the given name is recognized as
|
||||
// "valid" by the DNS, in which case we consider ourselves "done"
|
||||
// and that answer is returned. Note that, due to the way the DNS
|
||||
// handles "name has resource records, but none of the specified type",
|
||||
// the answer received may have an empty set of results.
|
||||
// 1. One of the permutations of the given name is recognized as
|
||||
// "valid" by the DNS, in which case we consider ourselves "done"
|
||||
// and that answer is returned. Note that, due to the way the DNS
|
||||
// handles "name has resource records, but none of the specified type",
|
||||
// the answer received may have an empty set of results.
|
||||
//
|
||||
// 2. All of the permutations of the given name are responded to by one of
|
||||
// the servers in the "nameservers" list with the answer "that name does
|
||||
// not exist" (NXDOMAIN). In that case, it can be considered
|
||||
// pseudo-authoritative that there are no records for that name.
|
||||
// 2. All of the permutations of the given name are responded to by one of
|
||||
// the servers in the "nameservers" list with the answer "that name does
|
||||
// not exist" (NXDOMAIN). In that case, it can be considered
|
||||
// pseudo-authoritative that there are no records for that name.
|
||||
//
|
||||
// 3. One or more of the names was responded to by all servers with some
|
||||
// sort of error indication. In that case, we can't know if, in fact,
|
||||
// there are records for the name or not, so whatever state the
|
||||
// configuration is in, we should keep it that way until we know for
|
||||
// sure (by, presumably, all the names getting answers in the future).
|
||||
// 3. One or more of the names was responded to by all servers with some
|
||||
// sort of error indication. In that case, we can't know if, in fact,
|
||||
// there are records for the name or not, so whatever state the
|
||||
// configuration is in, we should keep it that way until we know for
|
||||
// sure (by, presumably, all the names getting answers in the future).
|
||||
//
|
||||
// Outcomes 1 and 2 are indicated by a valid response message (possibly an
|
||||
// empty one) and no error. Outcome 3 is indicated by an error return. The
|
||||
|
@ -301,11 +313,11 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
|||
//
|
||||
// A "viable answer" is one which indicates either:
|
||||
//
|
||||
// 1. "yes, I know that name, and here are its records of the requested type"
|
||||
// (RCODE==SUCCESS, ANCOUNT > 0);
|
||||
// 2. "yes, I know that name, but it has no records of the requested type"
|
||||
// (RCODE==SUCCESS, ANCOUNT==0); or
|
||||
// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN).
|
||||
// 1. "yes, I know that name, and here are its records of the requested type"
|
||||
// (RCODE==SUCCESS, ANCOUNT > 0);
|
||||
// 2. "yes, I know that name, but it has no records of the requested type"
|
||||
// (RCODE==SUCCESS, ANCOUNT==0); or
|
||||
// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN).
|
||||
//
|
||||
// A non-viable answer is "anything else", which encompasses both various
|
||||
// system-level problems (like network timeouts) and also
|
||||
|
|
|
@ -80,6 +80,7 @@ func TestDNS(t *testing.T) {
|
|||
"__meta_dns_name": "web.example.com.",
|
||||
"__meta_dns_srv_record_target": "",
|
||||
"__meta_dns_srv_record_port": "",
|
||||
"__meta_dns_mx_record_target": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -110,6 +111,7 @@ func TestDNS(t *testing.T) {
|
|||
"__meta_dns_name": "web.example.com.",
|
||||
"__meta_dns_srv_record_target": "",
|
||||
"__meta_dns_srv_record_port": "",
|
||||
"__meta_dns_mx_record_target": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -140,12 +142,14 @@ func TestDNS(t *testing.T) {
|
|||
"__meta_dns_name": "_mysql._tcp.db.example.com.",
|
||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||
"__meta_dns_srv_record_port": "3306",
|
||||
"__meta_dns_mx_record_target": "",
|
||||
},
|
||||
{
|
||||
"__address__": "db2.example.com:3306",
|
||||
"__meta_dns_name": "_mysql._tcp.db.example.com.",
|
||||
"__meta_dns_srv_record_target": "db2.example.com.",
|
||||
"__meta_dns_srv_record_port": "3306",
|
||||
"__meta_dns_mx_record_target": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -175,6 +179,7 @@ func TestDNS(t *testing.T) {
|
|||
"__meta_dns_name": "_mysql._tcp.db.example.com.",
|
||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||
"__meta_dns_srv_record_port": "3306",
|
||||
"__meta_dns_mx_record_target": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -195,6 +200,45 @@ func TestDNS(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MX record query",
|
||||
config: SDConfig{
|
||||
Names: []string{"example.com."},
|
||||
Type: "MX",
|
||||
Port: 25,
|
||||
RefreshInterval: model.Duration(time.Minute),
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
||||
&dns.MX{Preference: 10, Mx: "smtp2.example.com."},
|
||||
},
|
||||
},
|
||||
nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "example.com.",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "smtp1.example.com:25",
|
||||
"__meta_dns_name": "example.com.",
|
||||
"__meta_dns_srv_record_target": "",
|
||||
"__meta_dns_srv_record_port": "",
|
||||
"__meta_dns_mx_record_target": "smtp1.example.com.",
|
||||
},
|
||||
{
|
||||
"__address__": "smtp2.example.com:25",
|
||||
"__meta_dns_name": "example.com.",
|
||||
"__meta_dns_srv_record_target": "",
|
||||
"__meta_dns_srv_record_port": "",
|
||||
"__meta_dns_mx_record_target": "smtp2.example.com.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
|
|
@ -308,6 +308,7 @@ func TestInitialUpdate(t *testing.T) {
|
|||
"fixtures/valid.yml",
|
||||
"fixtures/valid.json",
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/oauth2/google"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
|
|
@ -322,6 +322,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
ports := strconv.FormatUint(uint64(port.Port), 10)
|
||||
|
||||
target[podContainerNameLabel] = lv(c.Name)
|
||||
target[podContainerImageLabel] = lv(c.Image)
|
||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||
target[podContainerPortNumberLabel] = lv(ports)
|
||||
target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
|
||||
|
@ -380,6 +381,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
|
|
|
@ -128,7 +128,8 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Name: "c1",
|
||||
Image: "c1:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
@ -138,7 +139,8 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Name: "c2",
|
||||
Image: "c2:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "sideport",
|
||||
|
@ -206,6 +208,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
|||
"__meta_kubernetes_pod_node_name": "testnode",
|
||||
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||
"__meta_kubernetes_pod_container_name": "c1",
|
||||
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -220,6 +223,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
|||
"__meta_kubernetes_pod_node_name": "testnode",
|
||||
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||
"__meta_kubernetes_pod_container_name": "c2",
|
||||
"__meta_kubernetes_pod_container_image": "c2:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "sideport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -649,7 +653,8 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Name: "c1",
|
||||
Image: "c1:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
@ -720,6 +725,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
|||
"__meta_kubernetes_pod_node_name": "testnode",
|
||||
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||
"__meta_kubernetes_pod_container_name": "c1",
|
||||
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -753,7 +759,8 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "p1",
|
||||
Name: "p1",
|
||||
Image: "p1:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
|
|
@ -350,6 +350,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
ports := strconv.FormatUint(uint64(*port.port()), 10)
|
||||
|
||||
target[podContainerNameLabel] = lv(c.Name)
|
||||
target[podContainerImageLabel] = lv(c.Image)
|
||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||
target[podContainerPortNumberLabel] = lv(ports)
|
||||
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
|
||||
|
@ -398,6 +399,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||
target := model.LabelSet{
|
||||
model.AddressLabel: lv(a),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNameLabel: lv(cport.Name),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||
|
|
|
@ -234,7 +234,8 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Name: "c1",
|
||||
Image: "c1:latest",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
@ -244,7 +245,8 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Name: "c2",
|
||||
Image: "c2:latest",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "sideport",
|
||||
|
@ -307,6 +309,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_pod_container_name": "c1",
|
||||
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -321,6 +324,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
{
|
||||
"__address__": "1.2.3.4:9001",
|
||||
"__meta_kubernetes_pod_container_name": "c2",
|
||||
"__meta_kubernetes_pod_container_image": "c2:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "sideport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -878,7 +882,8 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Name: "c1",
|
||||
Image: "c1:latest",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
@ -953,6 +958,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
|||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||
"__meta_kubernetes_pod_container_name": "c1",
|
||||
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -993,7 +999,8 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "p1",
|
||||
Name: "p1",
|
||||
Image: "p1:latest",
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "mainport",
|
||||
|
|
|
@ -336,6 +336,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
}
|
||||
|
||||
kcfg.UserAgent = userAgent
|
||||
kcfg.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
|
||||
c, err := kubernetes.NewForConfig(kcfg)
|
||||
if err != nil {
|
||||
|
|
|
@ -177,6 +177,7 @@ const (
|
|||
podNameLabel = metaLabelPrefix + "pod_name"
|
||||
podIPLabel = metaLabelPrefix + "pod_ip"
|
||||
podContainerNameLabel = metaLabelPrefix + "pod_container_name"
|
||||
podContainerImageLabel = metaLabelPrefix + "pod_container_image"
|
||||
podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name"
|
||||
podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number"
|
||||
podContainerPortProtocolLabel = metaLabelPrefix + "pod_container_port_protocol"
|
||||
|
@ -266,9 +267,10 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
|||
// We don't have a port so we just set the address label to the pod IP.
|
||||
// The user has to add a port manually.
|
||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||
model.AddressLabel: lv(pod.Status.PodIP),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||
model.AddressLabel: lv(pod.Status.PodIP),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
@ -280,6 +282,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
|||
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||
model.AddressLabel: lv(addr),
|
||||
podContainerNameLabel: lv(c.Name),
|
||||
podContainerImageLabel: lv(c.Image),
|
||||
podContainerPortNumberLabel: lv(ports),
|
||||
podContainerPortNameLabel: lv(port.Name),
|
||||
podContainerPortProtocolLabel: lv(string(port.Protocol)),
|
||||
|
|
|
@ -50,7 +50,8 @@ func makeMultiPortPods() *v1.Pod {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "testcontainer0",
|
||||
Name: "testcontainer0",
|
||||
Image: "testcontainer0:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "testport0",
|
||||
|
@ -65,7 +66,8 @@ func makeMultiPortPods() *v1.Pod {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: "testcontainer1",
|
||||
Name: "testcontainer1",
|
||||
Image: "testcontainer1:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -94,7 +96,8 @@ func makePods() *v1.Pod {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "testcontainer",
|
||||
Name: "testcontainer",
|
||||
Image: "testcontainer:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "testport",
|
||||
|
@ -130,7 +133,8 @@ func makeInitContainerPods() *v1.Pod {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "testcontainer",
|
||||
Name: "testcontainer",
|
||||
Image: "testcontainer:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "testport",
|
||||
|
@ -143,7 +147,8 @@ func makeInitContainerPods() *v1.Pod {
|
|||
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "initcontainer",
|
||||
Name: "initcontainer",
|
||||
Image: "initcontainer:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -169,6 +174,7 @@ func expectedPodTargetGroups(ns string) map[string]*targetgroup.Group {
|
|||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_pod_container_name": "testcontainer",
|
||||
"__meta_kubernetes_pod_container_image": "testcontainer:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "testport",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -219,6 +225,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
|
|||
{
|
||||
"__address__": "1.2.3.4:9000",
|
||||
"__meta_kubernetes_pod_container_name": "testcontainer0",
|
||||
"__meta_kubernetes_pod_container_image": "testcontainer0:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "testport0",
|
||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||
|
@ -227,15 +234,17 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
|
|||
{
|
||||
"__address__": "1.2.3.4:9001",
|
||||
"__meta_kubernetes_pod_container_name": "testcontainer0",
|
||||
"__meta_kubernetes_pod_container_image": "testcontainer0:latest",
|
||||
"__meta_kubernetes_pod_container_port_name": "testport1",
|
||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "UDP",
|
||||
"__meta_kubernetes_pod_container_init": "false",
|
||||
},
|
||||
{
|
||||
"__address__": "1.2.3.4",
|
||||
"__meta_kubernetes_pod_container_name": "testcontainer1",
|
||||
"__meta_kubernetes_pod_container_init": "false",
|
||||
"__address__": "1.2.3.4",
|
||||
"__meta_kubernetes_pod_container_name": "testcontainer1",
|
||||
"__meta_kubernetes_pod_container_image": "testcontainer1:latest",
|
||||
"__meta_kubernetes_pod_container_init": "false",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -267,9 +276,10 @@ func TestPodDiscoveryInitContainer(t *testing.T) {
|
|||
key := fmt.Sprintf("pod/%s/testpod", ns)
|
||||
expected := expectedPodTargetGroups(ns)
|
||||
expected[key].Targets = append(expected[key].Targets, model.LabelSet{
|
||||
"__address__": "1.2.3.4",
|
||||
"__meta_kubernetes_pod_container_name": "initcontainer",
|
||||
"__meta_kubernetes_pod_container_init": "true",
|
||||
"__address__": "1.2.3.4",
|
||||
"__meta_kubernetes_pod_container_name": "initcontainer",
|
||||
"__meta_kubernetes_pod_container_image": "initcontainer:latest",
|
||||
"__meta_kubernetes_pod_container_init": "true",
|
||||
})
|
||||
expected[key].Labels["__meta_kubernetes_pod_phase"] = "Pending"
|
||||
expected[key].Labels["__meta_kubernetes_pod_ready"] = "false"
|
||||
|
@ -329,7 +339,8 @@ func TestPodDiscoveryUpdate(t *testing.T) {
|
|||
NodeName: "testnode",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "testcontainer",
|
||||
Name: "testcontainer",
|
||||
Image: "testcontainer:latest",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "testport",
|
||||
|
|
|
@ -150,8 +150,10 @@ const (
|
|||
serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_"
|
||||
serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_"
|
||||
servicePortNameLabel = metaLabelPrefix + "service_port_name"
|
||||
servicePortNumberLabel = metaLabelPrefix + "service_port_number"
|
||||
servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol"
|
||||
serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip"
|
||||
serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip"
|
||||
serviceExternalNameLabel = metaLabelPrefix + "service_external_name"
|
||||
serviceType = metaLabelPrefix + "service_type"
|
||||
)
|
||||
|
@ -189,6 +191,7 @@ func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group {
|
|||
labelSet := model.LabelSet{
|
||||
model.AddressLabel: lv(addr),
|
||||
servicePortNameLabel: lv(port.Name),
|
||||
servicePortNumberLabel: lv(strconv.FormatInt(int64(port.Port), 10)),
|
||||
servicePortProtocolLabel: lv(string(port.Protocol)),
|
||||
serviceType: lv(string(svc.Spec.Type)),
|
||||
}
|
||||
|
@ -199,6 +202,10 @@ func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group {
|
|||
labelSet[serviceClusterIPLabel] = lv(svc.Spec.ClusterIP)
|
||||
}
|
||||
|
||||
if svc.Spec.Type == apiv1.ServiceTypeLoadBalancer {
|
||||
labelSet[serviceLoadBalancerIP] = lv(svc.Spec.LoadBalancerIP)
|
||||
}
|
||||
|
||||
tg.Targets = append(tg.Targets, labelSet)
|
||||
}
|
||||
|
||||
|
|
|
@ -96,6 +96,27 @@ func makeExternalService() *v1.Service {
|
|||
}
|
||||
}
|
||||
|
||||
func makeLoadBalancerService() *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testservice-loadbalancer",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: "testport",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: int32(31900),
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
LoadBalancerIP: "127.0.0.1",
|
||||
ClusterIP: "10.0.0.1",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceDiscoveryAdd(t *testing.T) {
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
|
||||
|
||||
|
@ -106,17 +127,20 @@ func TestServiceDiscoveryAdd(t *testing.T) {
|
|||
c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
obj = makeExternalService()
|
||||
c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
obj = makeLoadBalancerService()
|
||||
c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
|
||||
},
|
||||
expectedMaxItems: 2,
|
||||
expectedMaxItems: 3,
|
||||
expectedRes: map[string]*targetgroup.Group{
|
||||
"svc/default/testservice": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.default.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.default.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -132,6 +156,7 @@ func TestServiceDiscoveryAdd(t *testing.T) {
|
|||
"__address__": "testservice-external.default.svc:31900",
|
||||
"__meta_kubernetes_service_type": "ExternalName",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "31900",
|
||||
"__meta_kubernetes_service_external_name": "FooExternalName",
|
||||
},
|
||||
},
|
||||
|
@ -141,6 +166,24 @@ func TestServiceDiscoveryAdd(t *testing.T) {
|
|||
},
|
||||
Source: "svc/default/testservice-external",
|
||||
},
|
||||
"svc/default/testservice-loadbalancer": {
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice-loadbalancer.default.svc:31900",
|
||||
"__meta_kubernetes_service_type": "LoadBalancer",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "31900",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_loadbalancer_ip": "127.0.0.1",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"__meta_kubernetes_service_name": "testservice-loadbalancer",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
},
|
||||
Source: "svc/default/testservice-loadbalancer",
|
||||
},
|
||||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
@ -178,17 +221,19 @@ func TestServiceDiscoveryUpdate(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.default.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport0",
|
||||
"__address__": "testservice.default.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport0",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "UDP",
|
||||
"__address__": "testservice.default.svc:30901",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport1",
|
||||
"__address__": "testservice.default.svc:30901",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport1",
|
||||
"__meta_kubernetes_service_port_number": "30901",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -223,10 +268,11 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.ns1.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.ns1.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -239,10 +285,11 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.ns2.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.ns2.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -273,10 +320,11 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -307,10 +355,11 @@ func TestServiceDiscoveryAllNamespaces(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
@ -323,10 +372,11 @@ func TestServiceDiscoveryAllNamespaces(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__meta_kubernetes_service_port_protocol": "TCP",
|
||||
"__address__": "testservice.non-own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__address__": "testservice.non-own-ns.svc:30900",
|
||||
"__meta_kubernetes_service_type": "ClusterIP",
|
||||
"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
|
||||
"__meta_kubernetes_service_port_name": "testport",
|
||||
"__meta_kubernetes_service_port_number": "30900",
|
||||
},
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
|
|
|
@ -159,10 +159,10 @@ func TestMarathonSDRemoveApp(t *testing.T) {
|
|||
tg2 := tgs[0]
|
||||
|
||||
if tg2.Source != tg1.Source {
|
||||
t.Fatalf("Source is different: %s != %s", tg1.Source, tg2.Source)
|
||||
if len(tg2.Targets) > 0 {
|
||||
t.Fatalf("Got a non-empty target set: %s", tg2.Targets)
|
||||
t.Errorf("Got a non-empty target set: %s", tg2.Targets)
|
||||
}
|
||||
t.Fatalf("Source is different: %s != %s", tg1.Source, tg2.Source)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import (
|
|||
|
||||
const (
|
||||
pdbLabel = model.MetaLabelPrefix + "puppetdb_"
|
||||
pdbLabelQuery = pdbLabel + "query"
|
||||
pdbLabelCertname = pdbLabel + "certname"
|
||||
pdbLabelResource = pdbLabel + "resource"
|
||||
pdbLabelType = pdbLabel + "type"
|
||||
|
@ -215,6 +216,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
for _, resource := range resources {
|
||||
labels := model.LabelSet{
|
||||
pdbLabelQuery: model.LabelValue(d.query),
|
||||
pdbLabelCertname: model.LabelValue(resource.Certname),
|
||||
pdbLabelResource: model.LabelValue(resource.Resource),
|
||||
pdbLabelType: model.LabelValue(resource.Type),
|
||||
|
|
|
@ -91,6 +91,7 @@ func TestPuppetDBRefresh(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
|
||||
model.LabelName("__meta_puppetdb_query"): model.LabelValue("vhosts"),
|
||||
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
|
||||
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
|
||||
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
|
||||
|
@ -131,6 +132,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
|||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
|
||||
model.LabelName("__meta_puppetdb_query"): model.LabelValue("vhosts"),
|
||||
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
|
||||
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
|
||||
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
|
||||
|
|
|
@ -29,12 +29,12 @@ func TestTargetGroupStrictJSONUnmarshal(t *testing.T) {
|
|||
expectedGroup Group
|
||||
}{
|
||||
{
|
||||
json: ` {"labels": {},"targets": []}`,
|
||||
json: `{"labels": {},"targets": []}`,
|
||||
expectedReply: nil,
|
||||
expectedGroup: Group{Targets: []model.LabelSet{}, Labels: model.LabelSet{}},
|
||||
},
|
||||
{
|
||||
json: ` {"labels": {"my":"label"},"targets": ["localhost:9090","localhost:9091"]}`,
|
||||
json: `{"labels": {"my":"label"},"targets": ["localhost:9090","localhost:9091"]}`,
|
||||
expectedReply: nil,
|
||||
expectedGroup: Group{Targets: []model.LabelSet{
|
||||
{"__address__": "localhost:9090"},
|
||||
|
@ -42,11 +42,11 @@ func TestTargetGroupStrictJSONUnmarshal(t *testing.T) {
|
|||
}, Labels: model.LabelSet{"my": "label"}},
|
||||
},
|
||||
{
|
||||
json: ` {"label": {},"targets": []}`,
|
||||
json: `{"label": {},"targets": []}`,
|
||||
expectedReply: errors.New("json: unknown field \"label\""),
|
||||
},
|
||||
{
|
||||
json: ` {"labels": {},"target": []}`,
|
||||
json: `{"labels": {},"target": []}`,
|
||||
expectedReply: errors.New("json: unknown field \"target\""),
|
||||
},
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
conntrack "github.com/mwitkow/go-conntrack"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
|
|
@ -99,6 +99,7 @@ remote_read:
|
|||
|
||||
# Storage related settings that are runtime reloadable.
|
||||
storage:
|
||||
[ tsdb: <tsdb> ]
|
||||
[ exemplars: <exemplars> ]
|
||||
|
||||
# Configures exporting traces.
|
||||
|
@ -457,7 +458,7 @@ subscription_id: <string>
|
|||
# Optional client secret. Only required with authentication_method OAuth.
|
||||
[ client_secret: <secret> ]
|
||||
|
||||
# Optional resource group name. Limits discovery to this resource group.
|
||||
# Optional resource group name. Limits discovery to this resource group.
|
||||
[ resource_group: <string> ]
|
||||
|
||||
# Refresh interval to re-read the instance list.
|
||||
|
@ -623,7 +624,7 @@ metadata and a single tag).
|
|||
DigitalOcean SD configurations allow retrieving scrape targets from [DigitalOcean's](https://www.digitalocean.com/)
|
||||
Droplets API.
|
||||
This service discovery uses the public IPv4 address by default, by that can be
|
||||
changed with relabelling, as demonstrated in [the Prometheus digitalocean-sd
|
||||
changed with relabeling, as demonstrated in [the Prometheus digitalocean-sd
|
||||
configuration file](/documentation/examples/prometheus-digitalocean.yml).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
@ -961,8 +962,8 @@ A DNS-based service discovery configuration allows specifying a set of DNS
|
|||
domain names which are periodically queried to discover a list of targets. The
|
||||
DNS servers to be contacted are read from `/etc/resolv.conf`.
|
||||
|
||||
This service discovery method only supports basic DNS A, AAAA and SRV record
|
||||
queries, but not the advanced DNS-SD approach specified in
|
||||
This service discovery method only supports basic DNS A, AAAA, MX and SRV
|
||||
record queries, but not the advanced DNS-SD approach specified in
|
||||
[RFC6763](https://tools.ietf.org/html/rfc6763).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
@ -970,13 +971,14 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_dns_name`: the record name that produced the discovered target.
|
||||
* `__meta_dns_srv_record_target`: the target field of the SRV record
|
||||
* `__meta_dns_srv_record_port`: the port field of the SRV record
|
||||
* `__meta_dns_mx_record_target`: the target field of the MX record
|
||||
|
||||
```yaml
|
||||
# A list of DNS domain names to be queried.
|
||||
names:
|
||||
[ - <string> ]
|
||||
|
||||
# The type of DNS query to perform. One of SRV, A, or AAAA.
|
||||
# The type of DNS query to perform. One of SRV, A, AAAA or MX.
|
||||
[ type: <string> | default = 'SRV' ]
|
||||
|
||||
# The port number used if the query type is not SRV.
|
||||
|
@ -992,6 +994,11 @@ EC2 SD configurations allow retrieving scrape targets from AWS EC2
|
|||
instances. The private IP address is used by default, but may be changed to
|
||||
the public IP address with relabeling.
|
||||
|
||||
The IAM credentials used must have the `ec2:DescribeInstances` permission to
|
||||
discover scrape targets, and may optionally have the
|
||||
`ec2:DescribeAvailabilityZones` permission if you want the availability zone ID
|
||||
available as a label (see below).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_ec2_ami`: the EC2 Amazon Machine Image
|
||||
|
@ -1010,6 +1017,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
|
||||
* `__meta_ec2_public_dns_name`: the public DNS name of the instance, if available
|
||||
* `__meta_ec2_public_ip`: the public IP address of the instance, if available
|
||||
* `__meta_ec2_region`: the region of the instance
|
||||
* `__meta_ec2_subnet_id`: comma separated list of subnets IDs in which the instance is running, if available
|
||||
* `__meta_ec2_tag_<tagkey>`: each tag value of the instance
|
||||
* `__meta_ec2_vpc_id`: the ID of the VPC in which the instance is running, if available
|
||||
|
@ -1178,6 +1186,7 @@ The resource address is the `certname` of the resource and can be changed during
|
|||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_puppetdb_query`: the Puppet Query Language (PQL) query
|
||||
* `__meta_puppetdb_certname`: the name of the node associated with the resource
|
||||
* `__meta_puppetdb_resource`: a SHA-1 hash of the resource’s type, title, and parameters, for identification
|
||||
* `__meta_puppetdb_type`: the resource type
|
||||
|
@ -1269,6 +1278,7 @@ changes resulting in well-formed target groups are applied.
|
|||
Files must contain a list of static configs, using these formats:
|
||||
|
||||
**JSON**
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
|
@ -1282,6 +1292,7 @@ Files must contain a list of static configs, using these formats:
|
|||
```
|
||||
|
||||
**YAML**
|
||||
|
||||
```yaml
|
||||
- targets:
|
||||
[ - '<host>' ]
|
||||
|
@ -1494,8 +1505,8 @@ Example response body:
|
|||
```
|
||||
|
||||
The endpoint is queried periodically at the specified refresh interval.
|
||||
The `prometheus_sd_http_failures_total` counter metric tracks the number of
|
||||
refresh failures.
|
||||
The `prometheus_sd_http_failures_total` counter metric tracks the number of
|
||||
refresh failures.
|
||||
|
||||
Each target has a meta label `__meta_url` during the
|
||||
[relabeling phase](#relabel_config). Its value is set to the
|
||||
|
@ -1557,7 +1568,7 @@ following meta labels are available on all targets during
|
|||
[relabeling](#relabel_config):
|
||||
|
||||
* `__meta_ionos_server_availability_zone`: the availability zone of the server
|
||||
* `__meta_ionos_server_boot_cdrom_id`: the ID of the CD-ROM the server is booted
|
||||
* `__meta_ionos_server_boot_cdrom_id`: the ID of the CD-ROM the server is booted
|
||||
from
|
||||
* `__meta_ionos_server_boot_image_id`: the ID of the boot image or snapshot the
|
||||
server is booted from
|
||||
|
@ -1670,11 +1681,13 @@ Available meta labels:
|
|||
* `__meta_kubernetes_service_annotation_<annotationname>`: Each annotation from the service object.
|
||||
* `__meta_kubernetes_service_annotationpresent_<annotationname>`: "true" for each annotation of the service object.
|
||||
* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. (Does not apply to services of type ExternalName)
|
||||
* `__meta_kubernetes_service_loadbalancer_ip`: The IP address of the loadbalancer. (Applies to services of type LoadBalancer)
|
||||
* `__meta_kubernetes_service_external_name`: The DNS name of the service. (Applies to services of type ExternalName)
|
||||
* `__meta_kubernetes_service_label_<labelname>`: Each label from the service object.
|
||||
* `__meta_kubernetes_service_labelpresent_<labelname>`: `true` for each label of the service object.
|
||||
* `__meta_kubernetes_service_name`: The name of the service object.
|
||||
* `__meta_kubernetes_service_port_name`: Name of the service port for the target.
|
||||
* `__meta_kubernetes_service_port_number`: Number of the service port for the target.
|
||||
* `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target.
|
||||
* `__meta_kubernetes_service_type`: The type of the service.
|
||||
|
||||
|
@ -1695,6 +1708,7 @@ Available meta labels:
|
|||
* `__meta_kubernetes_pod_annotationpresent_<annotationname>`: `true` for each annotation from the pod object.
|
||||
* `__meta_kubernetes_pod_container_init`: `true` if the container is an [InitContainer](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
|
||||
* `__meta_kubernetes_pod_container_name`: Name of the container the target address points to.
|
||||
* `__meta_kubernetes_pod_container_image`: The image the container is using.
|
||||
* `__meta_kubernetes_pod_container_port_name`: Name of the container port.
|
||||
* `__meta_kubernetes_pod_container_port_number`: Number of the container port.
|
||||
* `__meta_kubernetes_pod_container_port_protocol`: Protocol of the container port.
|
||||
|
@ -1857,8 +1871,8 @@ namespaces:
|
|||
|
||||
# Optional metadata to attach to discovered targets. If omitted, no additional metadata is attached.
|
||||
attach_metadata:
|
||||
# Attaches node metadata to discovered targets. Valid for roles: pod, endpoints, endpointslice.
|
||||
# When set to true, Prometheus must have permissions to get Nodes.
|
||||
# Attaches node metadata to discovered targets. Valid for roles: pod, endpoints, endpointslice.
|
||||
# When set to true, Prometheus must have permissions to get Nodes.
|
||||
[ node: <boolean> | default = false ]
|
||||
```
|
||||
|
||||
|
@ -1956,6 +1970,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_lightsail_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
|
||||
* `__meta_lightsail_private_ip`: the private IP address of the instance
|
||||
* `__meta_lightsail_public_ip`: the public IP address of the instance, if available
|
||||
* `__meta_lightsail_region`: the region of the instance
|
||||
* `__meta_lightsail_tag_<tagkey>`: each tag value of the instance
|
||||
|
||||
See below for the configuration options for Lightsail discovery:
|
||||
|
@ -1992,7 +2007,7 @@ See below for the configuration options for Lightsail discovery:
|
|||
Linode SD configurations allow retrieving scrape targets from [Linode's](https://www.linode.com/)
|
||||
Linode APIv4.
|
||||
This service discovery uses the public IPv4 address by default, by that can be
|
||||
changed with relabelling, as demonstrated in [the Prometheus linode-sd
|
||||
changed with relabeling, as demonstrated in [the Prometheus linode-sd
|
||||
configuration file](/documentation/examples/prometheus-linode.yml).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
@ -2461,7 +2476,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
|
||||
|
||||
This role uses the private IPv4 address by default. This can be
|
||||
changed with relabelling, as demonstrated in [the Prometheus scaleway-sd
|
||||
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
|
||||
configuration file](/documentation/examples/prometheus-scaleway.yml).
|
||||
|
||||
#### Baremetal role
|
||||
|
@ -2479,7 +2494,7 @@ configuration file](/documentation/examples/prometheus-scaleway.yml).
|
|||
* `__meta_scaleway_baremetal_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
|
||||
|
||||
This role uses the public IPv4 address by default. This can be
|
||||
changed with relabelling, as demonstrated in [the Prometheus scaleway-sd
|
||||
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
|
||||
configuration file](/documentation/examples/prometheus-scaleway.yml).
|
||||
|
||||
See below for the configuration options for Scaleway discovery:
|
||||
|
@ -2616,7 +2631,7 @@ for a practical example on how to set up Uyuni Prometheus configuration.
|
|||
Vultr SD configurations allow retrieving scrape targets from [Vultr](https://www.vultr.com/).
|
||||
|
||||
This service discovery uses the main IPv4 address by default, which that be
|
||||
changed with relabelling, as demonstrated in [the Prometheus vultr-sd
|
||||
changed with relabeling, as demonstrated in [the Prometheus vultr-sd
|
||||
configuration file](/documentation/examples/prometheus-vultr.yml).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
@ -2980,38 +2995,6 @@ relabel_configs:
|
|||
[ - <relabel_config> ... ]
|
||||
```
|
||||
|
||||
### `<tracing_config>`
|
||||
|
||||
`tracing_config` configures exporting traces from Prometheus to a tracing backend via the OTLP protocol. Tracing is currently an **experimental** feature and could change in the future.
|
||||
|
||||
```yaml
|
||||
# Client used to export the traces. Options are 'http' or 'grpc'.
|
||||
[ client_type: <string> | default = grpc ]
|
||||
|
||||
# Endpoint to send the traces to. Should be provided in format <host>:<port>.
|
||||
[ endpoint: <string> ]
|
||||
|
||||
# Sets the probability a given trace will be sampled. Must be a float from 0 through 1.
|
||||
[ sampling_fraction: <float> | default = 0 ]
|
||||
|
||||
# If disabled, the client will use a secure connection.
|
||||
[ insecure: <boolean> | default = false ]
|
||||
|
||||
# Key-value pairs to be used as headers associated with gRPC or HTTP requests.
|
||||
headers:
|
||||
[ <string>: <string> ... ]
|
||||
|
||||
# Compression key for supported compression types. Supported compression: gzip.
|
||||
[ compression: <string> ]
|
||||
|
||||
# Maximum time the exporter will wait for each batch export.
|
||||
[ timeout: <duration> | default = 10s ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<remote_write>`
|
||||
|
||||
`write_relabel_configs` is relabeling applied to samples before sending them
|
||||
|
@ -3213,6 +3196,25 @@ There is a list of
|
|||
[integrations](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)
|
||||
with this feature.
|
||||
|
||||
### `<tsdb>`
|
||||
|
||||
`tsdb` lets you configure the runtime-reloadable configuration settings of the TSDB.
|
||||
|
||||
NOTE: Out-of-order ingestion is an experimental feature, but you do not need any additional flag to enable it. Setting `out_of_order_time_window` to a positive duration enables it.
|
||||
|
||||
```yaml
|
||||
# Configures how old an out-of-order/out-of-bounds sample can be w.r.t. the TSDB max time.
|
||||
# An out-of-order/out-of-bounds sample is ingested into the TSDB as long as the timestamp
|
||||
# of the sample is >= TSDB.MaxTime-out_of_order_time_window.
|
||||
#
|
||||
# When out_of_order_time_window is >0, the errors out-of-order and out-of-bounds are
|
||||
# combined into a single error called 'too-old'; a sample is either (a) ingestible
|
||||
# into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample
|
||||
# that is within the out-of-order window, or (b) too-old, i.e. not in-order
|
||||
# and before the out-of-order window.
|
||||
[ out_of_order_time_window: <duration> | default = 0s ]
|
||||
```
|
||||
|
||||
### `<exemplars>`
|
||||
|
||||
Note that exemplar storage is still considered experimental and must be enabled via `--enable-feature=exemplar-storage`.
|
||||
|
@ -3221,3 +3223,35 @@ Note that exemplar storage is still considered experimental and must be enabled
|
|||
# Configures the maximum size of the circular buffer used to store exemplars for all series. Resizable during runtime.
|
||||
[ max_exemplars: <int> | default = 100000 ]
|
||||
```
|
||||
|
||||
### `<tracing_config>`
|
||||
|
||||
`tracing_config` configures exporting traces from Prometheus to a tracing backend via the OTLP protocol. Tracing is currently an **experimental** feature and could change in the future.
|
||||
|
||||
```yaml
|
||||
# Client used to export the traces. Options are 'http' or 'grpc'.
|
||||
[ client_type: <string> | default = grpc ]
|
||||
|
||||
# Endpoint to send the traces to. Should be provided in format <host>:<port>.
|
||||
[ endpoint: <string> ]
|
||||
|
||||
# Sets the probability a given trace will be sampled. Must be a float from 0 through 1.
|
||||
[ sampling_fraction: <float> | default = 0 ]
|
||||
|
||||
# If disabled, the client will use a secure connection.
|
||||
[ insecure: <boolean> | default = false ]
|
||||
|
||||
# Key-value pairs to be used as headers associated with gRPC or HTTP requests.
|
||||
headers:
|
||||
[ <string>: <string> ... ]
|
||||
|
||||
# Compression key for supported compression types. Supported compression: gzip.
|
||||
[ compression: <string> ]
|
||||
|
||||
# Maximum time the exporter will wait for each batch export.
|
||||
[ timeout: <duration> | default = 10s ]
|
||||
|
||||
# TLS configuration.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
```
|
||||
|
|
|
@ -90,7 +90,7 @@ http_server_config:
|
|||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options
|
||||
[ X-Content-Type-Options: <string> ]
|
||||
# Set the X-XSS-Protection header to all responses.
|
||||
# Unset if blank. Accepted value is nosniff.
|
||||
# Unset if blank.
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection
|
||||
[ X-XSS-Protection: <string> ]
|
||||
# Set the Strict-Transport-Security header to HTTP responses.
|
||||
|
|
|
@ -51,13 +51,14 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar
|
|||
|
||||
### Numbers
|
||||
|
||||
| Name | Arguments | Returns | Notes |
|
||||
| ------------------ | -----------------| --------| --------- |
|
||||
| humanize | number or string | string | Converts a number to a more readable format, using [metric prefixes](https://en.wikipedia.org/wiki/Metric_prefix).
|
||||
| humanize1024 | number or string | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
|
||||
| humanizeDuration | number or string | string | Converts a duration in seconds to a more readable format. |
|
||||
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
|
||||
| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
|
||||
| Name | Arguments | Returns | Notes |
|
||||
|---------------------| -----------------| --------| --------- |
|
||||
| humanize | number or string | string | Converts a number to a more readable format, using [metric prefixes](https://en.wikipedia.org/wiki/Metric_prefix).
|
||||
| humanize1024 | number or string | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
|
||||
| humanizeDuration | number or string | string | Converts a duration in seconds to a more readable format. |
|
||||
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
|
||||
| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
|
||||
| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
|
||||
|
||||
Humanizing functions are intended to produce reasonable output for consumption
|
||||
by humans, and are not guaranteed to return the same results between Prometheus
|
||||
|
|
|
@ -94,3 +94,12 @@ computed at all.
|
|||
`--enable-feature=auto-gomaxprocs`
|
||||
|
||||
When enabled, GOMAXPROCS variable is automatically set to match Linux container CPU quota.
|
||||
|
||||
## No default scrape port
|
||||
|
||||
`--enable-feature=no-default-scrape-port`
|
||||
|
||||
When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
|
||||
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
|
||||
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
|
||||
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
|
||||
|
|
|
@ -12,6 +12,7 @@ Prometheus provides a set of management APIs to facilitate automation and integr
|
|||
|
||||
```
|
||||
GET /-/healthy
|
||||
HEAD /-/healthy
|
||||
```
|
||||
|
||||
This endpoint always returns 200 and should be used to check Prometheus health.
|
||||
|
@ -21,6 +22,7 @@ This endpoint always returns 200 and should be used to check Prometheus health.
|
|||
|
||||
```
|
||||
GET /-/ready
|
||||
HEAD /-/ready
|
||||
```
|
||||
|
||||
This endpoint returns 200 when Prometheus is ready to serve traffic (i.e. respond to queries).
|
||||
|
|
|
@ -206,6 +206,35 @@ $ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10
|
|||
}
|
||||
```
|
||||
|
||||
## Formatting query expressions
|
||||
|
||||
The following endpoint formats a PromQL expression in a prettified way:
|
||||
|
||||
```
|
||||
GET /api/v1/format_query
|
||||
POST /api/v1/format_query
|
||||
```
|
||||
|
||||
URL query parameters:
|
||||
|
||||
- `query=<string>`: Prometheus expression query string.
|
||||
|
||||
You can URL-encode these parameters directly in the request body by using the `POST` method and
|
||||
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
|
||||
query that may breach server-side URL character limits.
|
||||
|
||||
The `data` section of the query result is a string containing the formatted query expression. Note that any comments are removed in the formatted string.
|
||||
|
||||
The following example formats the expression `foo/bar`:
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : "foo / bar"
|
||||
}
|
||||
```
|
||||
|
||||
## Querying metadata
|
||||
|
||||
Prometheus offers a set of API endpoints to query metadata about series and their labels.
|
||||
|
@ -476,8 +505,8 @@ GET /api/v1/targets
|
|||
```
|
||||
|
||||
Both the active and dropped targets are part of the response by default.
|
||||
`labels` represents the label set after relabelling has occurred.
|
||||
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabelling has occurred.
|
||||
`labels` represents the label set after relabeling has occurred.
|
||||
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/targets
|
||||
|
@ -750,7 +779,7 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
|
|||
|
||||
## Querying metric metadata
|
||||
|
||||
It returns metadata about metrics currently scrapped from targets. However, it does not provide any target information.
|
||||
It returns metadata about metrics currently scraped from targets. However, it does not provide any target information.
|
||||
This is considered **experimental** and might change in the future.
|
||||
|
||||
```
|
||||
|
|
|
@ -136,6 +136,8 @@ delta(cpu_temp_celsius{host="zeus"}[2h])
|
|||
|
||||
`deriv(v range-vector)` calculates the per-second derivative of the time series in a range
|
||||
vector `v`, using [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
|
||||
The range vector must have at least two samples in order to perform the calculation. When `+Inf` or
|
||||
`-Inf` are found in the range vector, the slope and offset value calculated will be `NaN`.
|
||||
|
||||
`deriv` should only be used with gauges.
|
||||
|
||||
|
@ -335,6 +337,9 @@ January etc.
|
|||
`predict_linear(v range-vector, t scalar)` predicts the value of time series
|
||||
`t` seconds from now, based on the range vector `v`, using [simple linear
|
||||
regression](https://en.wikipedia.org/wiki/Simple_linear_regression).
|
||||
The range vector must have at least two samples in order to perform the
|
||||
calculation. When `+Inf` or `-Inf` are found in the range vector,
|
||||
the slope and offset value calculated will be `NaN`.
|
||||
|
||||
`predict_linear` should only be used with gauges.
|
||||
|
||||
|
|
87
docs/querying/remote_read_api.md
Normal file
87
docs/querying/remote_read_api.md
Normal file
|
@ -0,0 +1,87 @@
|
|||
---
|
||||
title: Remote Read API
|
||||
sort_rank: 7
|
||||
---
|
||||
|
||||
# Remote Read API
|
||||
|
||||
This is not currently considered par of the stable API and is subject to change
|
||||
even between non-major version releases of Prometheus.
|
||||
|
||||
## Format overview
|
||||
|
||||
The API response format is JSON. Every successful API request returns a `2xx`
|
||||
status code.
|
||||
|
||||
Invalid requests that reach the API handlers return a JSON error object
|
||||
and one of the following HTTP response codes:
|
||||
|
||||
- `400 Bad Request` when parameters are missing or incorrect.
|
||||
- `422 Unprocessable Entity` when an expression can't be executed
|
||||
([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)).
|
||||
- `503 Service Unavailable` when queries time out or abort.
|
||||
|
||||
Other non-`2xx` codes may be returned for errors occurring before the API
|
||||
endpoint is reached.
|
||||
|
||||
An array of warnings may be returned if there are errors that do
|
||||
not inhibit the request execution. All of the data that was successfully
|
||||
collected will be returned in the data field.
|
||||
|
||||
The JSON response envelope format is as follows:
|
||||
|
||||
```
|
||||
{
|
||||
"status": "success" | "error",
|
||||
"data": <data>,
|
||||
|
||||
// Only set if status is "error". The data field may still hold
|
||||
// additional data.
|
||||
"errorType": "<string>",
|
||||
"error": "<string>",
|
||||
|
||||
// Only if there were warnings while executing the request.
|
||||
// There will still be data in the data field.
|
||||
"warnings": ["<string>"]
|
||||
}
|
||||
```
|
||||
|
||||
Generic placeholders are defined as follows:
|
||||
|
||||
* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in
|
||||
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp
|
||||
in seconds, with optional decimal places for sub-second precision. Output
|
||||
timestamps are always represented as Unix timestamps in seconds.
|
||||
* `<series_selector>`: Prometheus [time series
|
||||
selectors](basics.md#time-series-selectors) like `http_requests_total` or
|
||||
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
|
||||
* `<duration>`: [Prometheus duration strings](basics.md#time_durations).
|
||||
For example, `5m` refers to a duration of 5 minutes.
|
||||
* `<bool>`: boolean values (strings `true` and `false`).
|
||||
|
||||
Note: Names of query parameters that may be repeated end with `[]`.
|
||||
|
||||
## Remote Read API
|
||||
|
||||
This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression.
|
||||
The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
|
||||
/// Can you clarify what you mean by this?
|
||||
/// https://github.com/prometheus/prometheus/pull/7266#discussion_r426456791 Can we talk a little bit how negotiation works of sampled vs streamed ?
|
||||
|
||||
Request are made to the following endpoint.
|
||||
```
|
||||
/api/v1/read
|
||||
```
|
||||
|
||||
### Samples
|
||||
/// Does it return a message that includes a list, or does it return a list of raw samples?
|
||||
|
||||
This returns a message that includes a list of raw samples.
|
||||
|
||||
### Streamed Chunks
|
||||
/// This is a little much detail, the relevant point is they're the internal implementation of the chunks.
|
||||
|
||||
These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf)
|
||||
compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second.
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ Backfilling can be used via the Promtool command line. Promtool will write the b
|
|||
promtool tsdb create-blocks-from openmetrics <input file> [<output directory>]
|
||||
```
|
||||
|
||||
After the creation of the blocks, move it to the data directory of Prometheus. If there is an overlap with the existing blocks in Prometheus, the flag `--storage.tsdb.allow-overlapping-blocks` needs to be set. Note that any backfilled data is subject to the retention configured for your Prometheus server (by time or size).
|
||||
After the creation of the blocks, move it to the data directory of Prometheus. If there is an overlap with the existing blocks in Prometheus, the flag `--storage.tsdb.allow-overlapping-blocks` needs to be set for Prometheus versions v2.38 and below. Note that any backfilled data is subject to the retention configured for your Prometheus server (by time or size).
|
||||
|
||||
#### Longer Block Durations
|
||||
|
||||
|
@ -189,7 +189,7 @@ $ promtool tsdb create-blocks-from rules \
|
|||
|
||||
The recording rule files provided should be a normal [Prometheus rules file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/).
|
||||
|
||||
The output of `promtool tsdb create-blocks-from rules` command is a directory that contains blocks with the historical rule data for all rules in the recording rule files. By default the output directory is `data/`. In order to make use of this new block data, the blocks must be moved to a running Prometheus instance data dir `storage.tsdb.path` that has the flag `--storage.tsdb.allow-overlapping-blocks` enabled. Once moved, the new blocks will merge with existing blocks when the next compaction runs.
|
||||
The output of `promtool tsdb create-blocks-from rules` command is a directory that contains blocks with the historical rule data for all rules in the recording rule files. By default, the output directory is `data/`. In order to make use of this new block data, the blocks must be moved to a running Prometheus instance data dir `storage.tsdb.path` (for Prometheus versions v2.38 and below, the flag `--storage.tsdb.allow-overlapping-blocks` must be enabled). Once moved, the new blocks will merge with existing blocks when the next compaction runs.
|
||||
|
||||
### Limitations
|
||||
|
||||
|
|
|
@ -1,53 +1,69 @@
|
|||
module github.com/prometheus/prometheus/documentation/examples/remote_storage
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.5
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/prometheus/common v0.34.0
|
||||
github.com/prometheus/prometheus v1.8.2-0.20220202104425-d819219dd438
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/influxdata/influxdb v1.10.0
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/aws/aws-sdk-go v1.42.31 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.72 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.26.1 // indirect
|
||||
go.opentelemetry.io/otel v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.2.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.34.0 // indirect
|
||||
go.opentelemetry.io/otel v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.9.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/goleak v1.1.12 // indirect
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/net v0.0.0-20220809184613-07c6da5e1ced // indirect
|
||||
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/prometheus/prometheus v0.38.0
|
||||
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 // indirect
|
||||
)
|
||||
|
||||
exclude (
|
||||
// These excludes are needed because of some weird version collision.
|
||||
// Feel free to try removing them after future dependency updates.
|
||||
cloud.google.com/go v0.26.0
|
||||
cloud.google.com/go v0.34.0
|
||||
cloud.google.com/go v0.65.0
|
||||
cloud.google.com/go v0.82.0
|
||||
)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -47,21 +47,21 @@ const (
|
|||
// byte sequence found in this TagValue in way very similar to the traditional
|
||||
// percent-encoding (https://en.wikipedia.org/wiki/Percent-encoding):
|
||||
//
|
||||
// - The string that underlies TagValue is scanned byte by byte.
|
||||
// - The string that underlies TagValue is scanned byte by byte.
|
||||
//
|
||||
// - If a byte represents a legal Graphite rune with the exception of '%', '/',
|
||||
// '=' and '.', that byte is directly copied to the resulting byte slice.
|
||||
// % is used for percent-encoding of other bytes.
|
||||
// / is not usable in filenames.
|
||||
// = is used when generating the path to associate values to labels.
|
||||
// . already means something for Graphite and thus can't be used in a value.
|
||||
// - If a byte represents a legal Graphite rune with the exception of '%', '/',
|
||||
// '=' and '.', that byte is directly copied to the resulting byte slice.
|
||||
// % is used for percent-encoding of other bytes.
|
||||
// / is not usable in filenames.
|
||||
// = is used when generating the path to associate values to labels.
|
||||
// . already means something for Graphite and thus can't be used in a value.
|
||||
//
|
||||
// - If the byte is any of (){},=.'"\, then a '\' will be prepended to it. We
|
||||
// do not percent-encode them since they are explicitly usable in this
|
||||
// way in Graphite.
|
||||
// - If the byte is any of (){},=.'"\, then a '\' will be prepended to it. We
|
||||
// do not percent-encode them since they are explicitly usable in this
|
||||
// way in Graphite.
|
||||
//
|
||||
// - All other bytes are replaced by '%' followed by two bytes containing the
|
||||
// uppercase ASCII representation of their hexadecimal value.
|
||||
// - All other bytes are replaced by '%' followed by two bytes containing the
|
||||
// uppercase ASCII representation of their hexadecimal value.
|
||||
//
|
||||
// This encoding allows to save arbitrary Go strings in Graphite. That's
|
||||
// required because Prometheus label values can contain anything. Using
|
||||
|
|
|
@ -317,6 +317,20 @@
|
|||
description: '{{ printf "%%.0f" $value }} targets in Prometheus %(prometheusName)s have failed to sync because invalid configuration was supplied.' % $._config,
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusHighQueryLoad',
|
||||
expr: |||
|
||||
avg_over_time(prometheus_engine_queries{%(prometheusSelector)s}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{%(prometheusSelector)s}[5m]) > 0.8
|
||||
||| % $._config,
|
||||
'for': '15m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Prometheus is reaching its maximum capacity serving concurrent requests.',
|
||||
description: 'Prometheus %(prometheusName)s query API has less than 20%% available capacity in its query engine for the last 15 minutes.' % $._config,
|
||||
},
|
||||
},
|
||||
] + if $._config.prometheusHAGroupLabels == '' then self.rulesWithoutHA else self.rulesWithHA,
|
||||
rulesWithoutHA:: [
|
||||
{
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
// If you run Prometheus on Kubernetes with the Prometheus
|
||||
// Operator, you can make use of the configured target labels for
|
||||
// nicer naming:
|
||||
// prometheusNameTemplate: '{{$labels.namespace}}/{{$labels.pod}}'
|
||||
// prometheusName: '{{$labels.namespace}}/{{$labels.pod}}'
|
||||
|
||||
// prometheusHAGroupName is inserted into annotations to name an
|
||||
// HA group. All labels used here must also be present in
|
||||
|
|
127
go.mod
127
go.mod
|
@ -1,48 +1,48 @@
|
|||
module github.com/prometheus/prometheus
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.27
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20
|
||||
github.com/Azure/go-autorest/autorest v0.11.28
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.44.47
|
||||
github.com/aws/aws-sdk-go v1.44.109
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
|
||||
github.com/digitalocean/godo v1.81.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/digitalocean/godo v1.84.1
|
||||
github.com/docker/docker v20.10.18+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.10.3
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.8
|
||||
github.com/fsnotify/fsnotify v1.5.4
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/go-logfmt/logfmt v0.5.1
|
||||
github.com/go-openapi/strfmt v0.21.2
|
||||
github.com/go-zookeeper/zk v1.0.2
|
||||
github.com/go-openapi/strfmt v0.21.3
|
||||
github.com/go-zookeeper/zk v1.0.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3
|
||||
github.com/gophercloud/gophercloud v0.25.0
|
||||
github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40
|
||||
github.com/gophercloud/gophercloud v1.0.0
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.13.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec
|
||||
github.com/hetznercloud/hcloud-go v1.35.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0
|
||||
github.com/hashicorp/consul/api v1.15.2
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf
|
||||
github.com/hetznercloud/hcloud-go v1.35.3
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/linode/linodego v1.8.0
|
||||
github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd
|
||||
github.com/linode/linodego v1.9.3
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.24.0
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.35.0
|
||||
github.com/prometheus/common v0.37.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.7.1
|
||||
|
@ -50,34 +50,34 @@ require (
|
|||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0
|
||||
go.opentelemetry.io/otel v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0
|
||||
go.opentelemetry.io/otel/sdk v1.7.0
|
||||
go.opentelemetry.io/otel/trace v1.7.0
|
||||
go.uber.org/atomic v1.9.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/trace v1.10.0
|
||||
go.uber.org/atomic v1.10.0
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858
|
||||
golang.org/x/tools v0.1.11
|
||||
google.golang.org/api v0.86.0
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03
|
||||
google.golang.org/grpc v1.47.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
go.uber.org/goleak v1.2.0
|
||||
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1
|
||||
golang.org/x/sync v0.0.0-20220907140024-f12130a52804
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
|
||||
golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45
|
||||
golang.org/x/tools v0.1.12
|
||||
google.golang.org/api v0.96.0
|
||||
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006
|
||||
google.golang.org/grpc v1.49.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/api v0.25.2
|
||||
k8s.io/apimachinery v0.25.2
|
||||
k8s.io/client-go v0.25.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.70.0
|
||||
k8s.io/klog/v2 v2.80.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -92,7 +92,7 @@ require (
|
|||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
|
||||
github.com/armon/go-metrics v0.3.3 // indirect
|
||||
github.com/armon/go-metrics v0.3.10 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
|
@ -101,7 +101,7 @@ require (
|
|||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
|
@ -118,7 +118,6 @@ require (
|
|||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/go-openapi/validate v0.21.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
|
@ -130,17 +129,16 @@ require (
|
|||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-hclog v0.12.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-hclog v0.14.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/serf v0.9.6 // indirect
|
||||
github.com/hashicorp/serf v0.9.7 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
|
@ -151,38 +149,37 @@ require (
|
|||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||
github.com/onsi/gomega v1.15.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.mongodb.org/mongo-driver v1.8.3 // indirect
|
||||
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.30.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
|
|
351
go.sum
351
go.sum
|
@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
|||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
|
@ -14,6 +15,7 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ
|
|||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
|
@ -26,7 +28,6 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
|
|||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0 h1:DAq3r8y4mDgyB/ZPJ9v/5VJNqjgJAxTn6ZYLlUywOu8=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
|
@ -53,6 +54,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
|
@ -61,13 +63,11 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl
|
|||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
|
||||
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
|
||||
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
|
@ -87,7 +87,6 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
|
|||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
|
@ -110,13 +109,11 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
|||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
|
||||
github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
|
@ -124,8 +121,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.11/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.47 h1:uyiNvoR4wfZ8Bp4ghgbyzGFIg5knjZMUAd5S9ba9qNU=
|
||||
github.com/aws/aws-sdk-go v1.44.47/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.109 h1:+Na5JPeS0kiEHoBp5Umcuuf+IDqXqD0lXnM920E31YI=
|
||||
github.com/aws/aws-sdk-go v1.44.109/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -133,24 +130,19 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
|
@ -183,17 +175,15 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz
|
|||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.81.0 h1:sjb3fOfPfSlUQUK22E87BcI8Zx2qtnF7VUCCO4UK3C8=
|
||||
github.com/digitalocean/godo v1.81.0/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew=
|
||||
github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo=
|
||||
github.com/digitalocean/godo v1.84.1/go.mod h1:BPCqvwbjbGqxuUnIKB4EvS/AX7IDnNmt5fwvIkWo+ew=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc=
|
||||
github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
|
@ -204,10 +194,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
|
|||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -221,8 +209,9 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.
|
|||
github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=
|
||||
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.8/go.mod h1:0ZMblUx0cxNoWRswEEXoj9kHBmqX8pxGweMiyIAfR6A=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
|
@ -230,18 +219,13 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
|
|||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
@ -274,8 +258,6 @@ github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX
|
|||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
|
||||
|
@ -285,10 +267,10 @@ github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1
|
|||
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os=
|
||||
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
|
@ -304,9 +286,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
|
|||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
|
||||
github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
|
||||
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
|
||||
|
@ -386,7 +367,6 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
|||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -407,7 +387,6 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
|||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
|
@ -423,13 +402,14 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 h1:mpL/HvfIgIejhVwAfxBQkwEjlhP5o0O9RAeTAjpwzxc=
|
||||
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
|
||||
github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE=
|
||||
github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -448,33 +428,32 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99
|
|||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4=
|
||||
github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k=
|
||||
github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k=
|
||||
github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2 h1:ERKrevVTnCw3Wu4I3mtR15QU3gtWy86cBo6De0jEohg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2/go.mod h1:chrfS3YoLAlKTRE5cFWvCbt8uGAjshktT4PveTUpsFQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1 h1:/sDbPb60SusIXjiJGYLUoS/rAQurQmvGWmwn2bBPM9c=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.1/go.mod h1:G+WkljZi4mflcqVxYSgvt8MNctRQHjEH8ubKtt1Ka3w=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc=
|
||||
github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ=
|
||||
github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0=
|
||||
github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
|
||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
|
||||
github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4=
|
||||
github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw=
|
||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||
github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
|
@ -485,13 +464,14 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n
|
|||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4=
|
||||
github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
|
||||
github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
|
@ -507,8 +487,9 @@ github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0S
|
|||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -522,26 +503,24 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
|||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM=
|
||||
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec h1:jAF71e0KoaY2LJlRsRxxGz6MNQOG5gTBIc+rklxfNO0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220629141207-c2428e1673ec/go.mod h1:jP79oXjopTyH6E8LF0CEMq67STgrlmBRIyijA0tuR5o=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf/go.mod h1:Z0U0rpbh4Qlkgqu3iRDcfJBA+r3FgoeD1BfigmZhfzM=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
|
||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hetznercloud/hcloud-go v1.35.0 h1:sduXOrWM0/sJXwBty7EQd7+RXEJh5+CsAGQmHshChFg=
|
||||
github.com/hetznercloud/hcloud-go v1.35.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
|
||||
github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
|
||||
github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow=
|
||||
github.com/hetznercloud/hcloud-go v1.35.3/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0 h1:0EZz5H+t6W23zHt6dgHYkKavr72/30O9nA97E3FZaS4=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.0/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.3/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME=
|
||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -574,8 +553,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
|||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg=
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -585,7 +564,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
|
@ -594,9 +572,10 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
|
|||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.8.0 h1:7B2UaWu6C48tZZZrtINWRElAcwzk4TLnL9USjKf3xm0=
|
||||
github.com/linode/linodego v1.8.0/go.mod h1:heqhl91D8QTPVm2k9qZHP78zzbOdTFLXE9NJc3bcc50=
|
||||
github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ=
|
||||
github.com/linode/linodego v1.9.3/go.mod h1:h6AuFR/JpqwwM/vkj7s8KV3iGN8/jxn+zc437F8SZ8w=
|
||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -636,7 +615,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG
|
|||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
|
@ -644,9 +622,9 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
|||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -656,17 +634,14 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
|
@ -675,9 +650,6 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
|
|||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
|
@ -685,19 +657,12 @@ github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DV
|
|||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
|
||||
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
|
@ -719,7 +684,6 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
|
|||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -728,12 +692,12 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
|
||||
github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw=
|
||||
github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
|
@ -744,8 +708,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -761,8 +725,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
|||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE=
|
||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
|
@ -775,8 +739,9 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
|
@ -784,7 +749,6 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
|
@ -794,6 +758,7 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZX
|
|||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v0.3.1 h1:dhGZztS6nQuvJ0o0RtUiQHaEO4hhArh/WmWwik3Ols0=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
|
@ -818,6 +783,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
|
|||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
|
@ -839,7 +805,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
|
@ -852,7 +817,9 @@ github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZ
|
|||
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
|
@ -862,12 +829,15 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.8.3 h1:TDKlTkGDKm9kkJVUOAXDK5/fkqKHJVwYQSpoRfB43R4=
|
||||
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.10.2 h1:4Wk3cnqOrQCn0P92L3/mmurMxzdvWWs5J9jinAVKD+k=
|
||||
go.mongodb.org/mongo-driver v1.10.2/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
@ -878,36 +848,37 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKRqwaX6zxHPDe3pvmWpwuuIM0vuXv2juCnQevE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY=
|
||||
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
|
||||
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 h1:7Yxsak1q4XrJ5y7XBnNwqWx9amMZvoidCctv62XOQ6Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 h1:cMDtmgJ5FpRvqx9x2Aq+Mm0O6K/zcUkH73SFz20TuBw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 h1:MFAyzUPrTwLOwCi+cltN0ZVyy4phU41lwH+lyMyQTS4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0 h1:pLP0MH4MAqeTEV0g/4flxw9O8Is48uAIauAnjznbW50=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.7.0/go.mod h1:aFXT9Ng2seM9eizF+LfKiyPBGy8xIZKwhusC1gIu3hA=
|
||||
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
|
||||
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
|
||||
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
|
||||
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 h1:qZ3KzA4qPzLBDtQyPk4ydjlg8zvXbNysnFHaVMKJbVo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0/go.mod h1:14Oo79mRwusSI02L0EfG3Gp1uF3+1wSL+D4zDysxyqs=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0 h1:S8DedULB3gp93Rh+9Z+7NTEv+6Id/KYS7LDyipZ9iCE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.10.0/go.mod h1:5WV40MLWwvWlGP7Xm8g3pMcg0pKOUY609qxJn8y7LmM=
|
||||
go.opentelemetry.io/otel/metric v0.32.0 h1:lh5KMDB8xlMM4kwE38vlZJ3rZeiWrjw3As1vclfC01k=
|
||||
go.opentelemetry.io/otel/metric v0.32.0/go.mod h1:PVDNTt297p8ehm949jsIzd+Z2bIZJYQQG/uuHTeWFHY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E=
|
||||
go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
|
||||
go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
|
@ -925,15 +896,15 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -944,6 +915,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -994,7 +967,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
@ -1006,7 +978,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
|
@ -1014,27 +985,32 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9 h1:asZqf0wXastQr+DudYagQS8uBO8bHKeYD1vbAvGmFL8=
|
||||
golang.org/x/net v0.0.0-20220920203100-d0c6ba3f52d9/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1055,9 +1031,9 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j
|
|||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1070,8 +1046,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A=
|
||||
golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1094,14 +1072,11 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1109,7 +1084,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1119,7 +1093,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1128,10 +1101,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1160,16 +1133,17 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8=
|
||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908150016-7ac13a9a928d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
|
@ -1188,9 +1162,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 h1:yuLAip3bfURHClMG9VBdzPrQvCWjWiWUTBGV+/fCbUs=
|
||||
golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -1237,7 +1210,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
|
|||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
|
@ -1247,9 +1219,9 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
|
|||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
@ -1258,16 +1230,17 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
|
@ -1306,8 +1279,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
|
|||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.96.0 h1:F60cuQPJq7K7FzsxMYHAUJSiXh2oKctHxBMbDygxhfM=
|
||||
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1352,7 +1325,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
|
@ -1393,13 +1368,12 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX
|
|||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg=
|
||||
google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1435,8 +1409,9 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5
|
|||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
||||
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
@ -1451,8 +1426,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -1467,11 +1443,10 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
|||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/telebot.v3 v3.0.0/go.mod h1:7rExV8/0mDDNu9epSrDm/8j22KLaActH1Tbee6YjzWg=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
|
@ -1501,28 +1476,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
|
||||
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
|
||||
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
|
||||
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8=
|
||||
k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0=
|
||||
k8s.io/apimachinery v0.25.2 h1:WbxfAjCx+AeN8Ilp9joWnyJ6xu9OMeS/fsfjK/5zaQs=
|
||||
k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA=
|
||||
k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo=
|
||||
k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4=
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
|
|
@ -333,6 +333,11 @@ func (ls Labels) Map() map[string]string {
|
|||
return m
|
||||
}
|
||||
|
||||
// EmptyLabels returns n empty Labels value, for convenience.
|
||||
func EmptyLabels() Labels {
|
||||
return Labels{}
|
||||
}
|
||||
|
||||
// New returns a sorted Labels from the given labels.
|
||||
// The caller has to guarantee that all label names are unique.
|
||||
func New(ls ...Label) Labels {
|
||||
|
@ -467,17 +472,25 @@ func (b *Builder) Set(n, v string) *Builder {
|
|||
return b
|
||||
}
|
||||
|
||||
// Labels returns the labels from the builder. If no modifications
|
||||
// were made, the original labels are returned.
|
||||
func (b *Builder) Labels() Labels {
|
||||
// Labels returns the labels from the builder, adding them to res if non-nil.
|
||||
// Argument res can be the same as b.base, if caller wants to overwrite that slice.
|
||||
// If no modifications were made, the original labels are returned.
|
||||
func (b *Builder) Labels(res Labels) Labels {
|
||||
if len(b.del) == 0 && len(b.add) == 0 {
|
||||
return b.base
|
||||
}
|
||||
|
||||
// In the general case, labels are removed, modified or moved
|
||||
// rather than added.
|
||||
res := make(Labels, 0, len(b.base))
|
||||
if res == nil {
|
||||
// In the general case, labels are removed, modified or moved
|
||||
// rather than added.
|
||||
res = make(Labels, 0, len(b.base))
|
||||
} else {
|
||||
res = res[:0]
|
||||
}
|
||||
Outer:
|
||||
// Justification that res can be the same slice as base: in this loop
|
||||
// we move forward through base, and either skip an element or assign
|
||||
// it to res at its current position or an earlier position.
|
||||
for _, l := range b.base {
|
||||
for _, n := range b.del {
|
||||
if l.Name == n {
|
||||
|
|
|
@ -14,11 +14,13 @@
|
|||
package labels
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestLabels_String(t *testing.T) {
|
||||
|
@ -27,16 +29,7 @@ func TestLabels_String(t *testing.T) {
|
|||
expected string
|
||||
}{
|
||||
{
|
||||
lables: Labels{
|
||||
{
|
||||
Name: "t1",
|
||||
Value: "t1",
|
||||
},
|
||||
{
|
||||
Name: "t2",
|
||||
Value: "t2",
|
||||
},
|
||||
},
|
||||
lables: FromStrings("t1", "t1", "t2", "t2"),
|
||||
expected: "{t1=\"t1\", t2=\"t2\"}",
|
||||
},
|
||||
{
|
||||
|
@ -55,32 +48,13 @@ func TestLabels_String(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLabels_MatchLabels(t *testing.T) {
|
||||
labels := Labels{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "ALERTS",
|
||||
},
|
||||
{
|
||||
Name: "alertname",
|
||||
Value: "HTTPRequestRateLow",
|
||||
},
|
||||
{
|
||||
Name: "alertstate",
|
||||
Value: "pending",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "0",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "app-server",
|
||||
},
|
||||
{
|
||||
Name: "severity",
|
||||
Value: "critical",
|
||||
},
|
||||
}
|
||||
labels := FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"alertstate", "pending",
|
||||
"instance", "0",
|
||||
"job", "app-server",
|
||||
"severity", "critical")
|
||||
|
||||
tests := []struct {
|
||||
providedNames []string
|
||||
|
@ -96,24 +70,11 @@ func TestLabels_MatchLabels(t *testing.T) {
|
|||
"instance",
|
||||
},
|
||||
on: true,
|
||||
expected: Labels{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "ALERTS",
|
||||
},
|
||||
{
|
||||
Name: "alertname",
|
||||
Value: "HTTPRequestRateLow",
|
||||
},
|
||||
{
|
||||
Name: "alertstate",
|
||||
Value: "pending",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "0",
|
||||
},
|
||||
},
|
||||
expected: FromStrings(
|
||||
"__name__", "ALERTS",
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"alertstate", "pending",
|
||||
"instance", "0"),
|
||||
},
|
||||
// on = false, explicitly excluding metric name from matching.
|
||||
{
|
||||
|
@ -124,16 +85,9 @@ func TestLabels_MatchLabels(t *testing.T) {
|
|||
"instance",
|
||||
},
|
||||
on: false,
|
||||
expected: Labels{
|
||||
{
|
||||
Name: "job",
|
||||
Value: "app-server",
|
||||
},
|
||||
{
|
||||
Name: "severity",
|
||||
Value: "critical",
|
||||
},
|
||||
},
|
||||
expected: FromStrings(
|
||||
"job", "app-server",
|
||||
"severity", "critical"),
|
||||
},
|
||||
// on = true, explicitly excluding metric name from matching.
|
||||
{
|
||||
|
@ -143,20 +97,10 @@ func TestLabels_MatchLabels(t *testing.T) {
|
|||
"instance",
|
||||
},
|
||||
on: true,
|
||||
expected: Labels{
|
||||
{
|
||||
Name: "alertname",
|
||||
Value: "HTTPRequestRateLow",
|
||||
},
|
||||
{
|
||||
Name: "alertstate",
|
||||
Value: "pending",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "0",
|
||||
},
|
||||
},
|
||||
expected: FromStrings(
|
||||
"alertname", "HTTPRequestRateLow",
|
||||
"alertstate", "pending",
|
||||
"instance", "0"),
|
||||
},
|
||||
// on = false, implicitly excluding metric name from matching.
|
||||
{
|
||||
|
@ -166,16 +110,9 @@ func TestLabels_MatchLabels(t *testing.T) {
|
|||
"instance",
|
||||
},
|
||||
on: false,
|
||||
expected: Labels{
|
||||
{
|
||||
Name: "job",
|
||||
Value: "app-server",
|
||||
},
|
||||
{
|
||||
Name: "severity",
|
||||
Value: "critical",
|
||||
},
|
||||
},
|
||||
expected: FromStrings(
|
||||
"job", "app-server",
|
||||
"severity", "critical"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -195,10 +132,7 @@ func TestLabels_HasDuplicateLabelNames(t *testing.T) {
|
|||
Input: FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
|
||||
Duplicate: false,
|
||||
}, {
|
||||
Input: append(
|
||||
FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
|
||||
FromMap(map[string]string{"hostname": "127.0.0.1"})...,
|
||||
),
|
||||
Input: FromStrings("__name__", "up", "hostname", "localhost", "hostname", "127.0.0.1"),
|
||||
Duplicate: true,
|
||||
LabelName: "hostname",
|
||||
},
|
||||
|
@ -217,73 +151,63 @@ func TestLabels_WithoutEmpty(t *testing.T) {
|
|||
expected Labels
|
||||
}{
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "foo"},
|
||||
{Name: "bar"},
|
||||
},
|
||||
expected: Labels{},
|
||||
input: FromStrings(
|
||||
"foo", "",
|
||||
"bar", ""),
|
||||
expected: EmptyLabels(),
|
||||
},
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "foo"},
|
||||
{Name: "bar"},
|
||||
{Name: "baz"},
|
||||
},
|
||||
expected: Labels{},
|
||||
input: FromStrings(
|
||||
"foo", "",
|
||||
"bar", "",
|
||||
"baz", ""),
|
||||
expected: EmptyLabels(),
|
||||
},
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
expected: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
input: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"job", "check"),
|
||||
expected: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"job", "check"),
|
||||
},
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "bar"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
expected: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
input: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"bar", "",
|
||||
"job", "check"),
|
||||
expected: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"job", "check"),
|
||||
},
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "foo"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "bar"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
expected: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
input: FromStrings(
|
||||
"__name__", "test",
|
||||
"foo", "",
|
||||
"hostname", "localhost",
|
||||
"bar", "",
|
||||
"job", "check"),
|
||||
expected: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"job", "check"),
|
||||
},
|
||||
{
|
||||
input: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "foo"},
|
||||
{Name: "baz"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "bar"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
expected: Labels{
|
||||
{Name: "__name__", Value: "test"},
|
||||
{Name: "hostname", Value: "localhost"},
|
||||
{Name: "job", Value: "check"},
|
||||
},
|
||||
input: FromStrings(
|
||||
"__name__", "test",
|
||||
"foo", "",
|
||||
"baz", "",
|
||||
"hostname", "localhost",
|
||||
"bar", "",
|
||||
"job", "check"),
|
||||
expected: FromStrings(
|
||||
"__name__", "test",
|
||||
"hostname", "localhost",
|
||||
"job", "check"),
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
|
@ -293,75 +217,37 @@ func TestLabels_WithoutEmpty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLabels_Equal(t *testing.T) {
|
||||
labels := Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
}
|
||||
labels := FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222")
|
||||
|
||||
tests := []struct {
|
||||
compared Labels
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
{
|
||||
Name: "ccc",
|
||||
Value: "333",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222",
|
||||
"ccc", "333"),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bar", "222"),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "233",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "233"),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222"),
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
@ -391,121 +277,72 @@ func TestLabels_FromStrings(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLabels_Compare(t *testing.T) {
|
||||
labels := Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
}
|
||||
labels := FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222")
|
||||
|
||||
tests := []struct {
|
||||
compared Labels
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "110",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "110",
|
||||
"bbb", "222"),
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "233",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "233"),
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bar", "222"),
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbc",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbc", "222"),
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111"),
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
{
|
||||
Name: "ccc",
|
||||
Value: "333",
|
||||
},
|
||||
{
|
||||
Name: "ddd",
|
||||
Value: "444",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222",
|
||||
"ccc", "333",
|
||||
"ddd", "444"),
|
||||
expected: -2,
|
||||
},
|
||||
{
|
||||
compared: Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
},
|
||||
compared: FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222"),
|
||||
expected: 0,
|
||||
},
|
||||
}
|
||||
|
||||
sign := func(a int) int {
|
||||
switch {
|
||||
case a < 0:
|
||||
return -1
|
||||
case a > 0:
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
got := Compare(labels, test.compared)
|
||||
require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
|
||||
require.Equal(t, sign(test.expected), sign(got), "unexpected comparison result for test case %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -524,16 +361,9 @@ func TestLabels_Has(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
labelsSet := Labels{
|
||||
{
|
||||
Name: "aaa",
|
||||
Value: "111",
|
||||
},
|
||||
{
|
||||
Name: "bbb",
|
||||
Value: "222",
|
||||
},
|
||||
}
|
||||
labelsSet := FromStrings(
|
||||
"aaa", "111",
|
||||
"bbb", "222")
|
||||
|
||||
for i, test := range tests {
|
||||
got := labelsSet.Has(test.input)
|
||||
|
@ -542,8 +372,8 @@ func TestLabels_Has(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLabels_Get(t *testing.T) {
|
||||
require.Equal(t, "", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("foo"))
|
||||
require.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
|
||||
require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo"))
|
||||
require.Equal(t, "111", FromStrings("aaa", "111", "bbb", "222").Get("aaa"))
|
||||
}
|
||||
|
||||
// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
|
||||
|
@ -561,19 +391,19 @@ func TestLabels_Get(t *testing.T) {
|
|||
// Labels_Get/with_30_labels/get_last_label 169ns ± 0% 29ns ± 0% ~ (p=1.000 n=1+1)
|
||||
func BenchmarkLabels_Get(b *testing.B) {
|
||||
maxLabels := 30
|
||||
allLabels := make(Labels, maxLabels)
|
||||
allLabels := make([]Label, maxLabels)
|
||||
for i := 0; i < maxLabels; i++ {
|
||||
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)}
|
||||
}
|
||||
for _, size := range []int{5, 10, maxLabels} {
|
||||
b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
|
||||
labels := allLabels[:size]
|
||||
labels := New(allLabels[:size]...)
|
||||
for _, scenario := range []struct {
|
||||
desc, label string
|
||||
}{
|
||||
{"get first label", labels[0].Name},
|
||||
{"get middle label", labels[size/2].Name},
|
||||
{"get last label", labels[size-1].Name},
|
||||
{"get first label", allLabels[0].Name},
|
||||
{"get middle label", allLabels[size/2].Name},
|
||||
{"get last label", allLabels[size-1].Name},
|
||||
} {
|
||||
b.Run(scenario.desc, func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
|
@ -593,18 +423,18 @@ func BenchmarkLabels_Equals(b *testing.B) {
|
|||
}{
|
||||
{
|
||||
"equal",
|
||||
Labels{{"a_label_name", "a_label_value"}, {"another_label_name", "another_label_value"}},
|
||||
Labels{{"a_label_name", "a_label_value"}, {"another_label_name", "another_label_value"}},
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
},
|
||||
{
|
||||
"not equal",
|
||||
Labels{{"a_label_name", "a_label_value"}, {"another_label_name", "another_label_value"}},
|
||||
Labels{{"a_label_name", "a_label_value"}, {"another_label_name", "a_different_label_value"}},
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "a_different_label_value"),
|
||||
},
|
||||
{
|
||||
"different sizes",
|
||||
Labels{{"a_label_name", "a_label_value"}, {"another_label_name", "another_label_value"}},
|
||||
Labels{{"a_label_name", "a_label_value"}},
|
||||
FromStrings("a_label_name", "a_label_value", "another_label_name", "another_label_value"),
|
||||
FromStrings("a_label_name", "a_label_value"),
|
||||
},
|
||||
} {
|
||||
b.Run(scenario.desc, func(b *testing.B) {
|
||||
|
@ -617,21 +447,22 @@ func BenchmarkLabels_Equals(b *testing.B) {
|
|||
}
|
||||
|
||||
func TestLabels_Copy(t *testing.T) {
|
||||
require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
|
||||
require.Equal(t, FromStrings("aaa", "111", "bbb", "222"), FromStrings("aaa", "111", "bbb", "222").Copy())
|
||||
}
|
||||
|
||||
func TestLabels_Map(t *testing.T) {
|
||||
require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
|
||||
require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, FromStrings("aaa", "111", "bbb", "222").Map())
|
||||
}
|
||||
|
||||
func TestLabels_BytesWithLabels(t *testing.T) {
|
||||
require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}.Bytes(nil), Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.BytesWithLabels(nil, "aaa", "bbb"))
|
||||
require.Equal(t, Labels{}.Bytes(nil), Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.BytesWithLabels(nil))
|
||||
require.Equal(t, FromStrings("aaa", "111", "bbb", "222").Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithLabels(nil, "aaa", "bbb"))
|
||||
require.Equal(t, FromStrings().Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithLabels(nil))
|
||||
}
|
||||
|
||||
func TestLabels_BytesWithoutLabels(t *testing.T) {
|
||||
require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.BytesWithoutLabels(nil, "bbb", "ccc"))
|
||||
require.Equal(t, Labels{{"aaa", "111"}}.Bytes(nil), Labels{{MetricName, "333"}, {"aaa", "111"}, {"bbb", "222"}}.BytesWithoutLabels(nil, MetricName, "bbb"))
|
||||
require.Equal(t, FromStrings("aaa", "111").Bytes(nil), FromStrings("aaa", "111", "bbb", "222", "ccc", "333").BytesWithoutLabels(nil, "bbb", "ccc"))
|
||||
require.Equal(t, FromStrings(MetricName, "333", "aaa", "111").Bytes(nil), FromStrings(MetricName, "333", "aaa", "111", "bbb", "222").BytesWithoutLabels(nil, "bbb"))
|
||||
require.Equal(t, FromStrings("aaa", "111").Bytes(nil), FromStrings(MetricName, "333", "aaa", "111", "bbb", "222").BytesWithoutLabels(nil, MetricName, "bbb"))
|
||||
}
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
|
@ -709,16 +540,13 @@ func TestBuilder(t *testing.T) {
|
|||
b.Keep(tcase.keep...)
|
||||
}
|
||||
b.Del(tcase.del...)
|
||||
require.Equal(t, tcase.want, b.Labels())
|
||||
require.Equal(t, tcase.want, b.Labels(tcase.base))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabels_Hash(t *testing.T) {
|
||||
lbls := Labels{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "baz", Value: "qux"},
|
||||
}
|
||||
lbls := FromStrings("foo", "bar", "baz", "qux")
|
||||
require.Equal(t, lbls.Hash(), lbls.Hash())
|
||||
require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
|
||||
require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
|
||||
|
@ -734,23 +562,23 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
|||
{
|
||||
name: "typical labels under 1KB",
|
||||
lbls: func() Labels {
|
||||
lbls := make(Labels, 10)
|
||||
for i := 0; i < len(lbls); i++ {
|
||||
b := NewBuilder(EmptyLabels())
|
||||
for i := 0; i < 10; i++ {
|
||||
// Label ~20B name, 50B value.
|
||||
lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
|
||||
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
||||
}
|
||||
return lbls
|
||||
return b.Labels(nil)
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "bigger labels over 1KB",
|
||||
lbls: func() Labels {
|
||||
lbls := make(Labels, 10)
|
||||
for i := 0; i < len(lbls); i++ {
|
||||
b := NewBuilder(EmptyLabels())
|
||||
for i := 0; i < 10; i++ {
|
||||
// Label ~50B name, 50B value.
|
||||
lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
|
||||
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
||||
}
|
||||
return lbls
|
||||
return b.Labels(nil)
|
||||
}(),
|
||||
},
|
||||
{
|
||||
|
@ -762,7 +590,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
|||
for i := 0; i < lbl.Cap()/len(word); i++ {
|
||||
_, _ = lbl.WriteString(word)
|
||||
}
|
||||
return Labels{{Name: "__name__", Value: lbl.String()}}
|
||||
return FromStrings("__name__", lbl.String())
|
||||
}(),
|
||||
},
|
||||
} {
|
||||
|
@ -778,3 +606,52 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshaling(t *testing.T) {
|
||||
lbls := FromStrings("aaa", "111", "bbb", "2222", "ccc", "33333")
|
||||
expectedJSON := "{\"aaa\":\"111\",\"bbb\":\"2222\",\"ccc\":\"33333\"}"
|
||||
b, err := json.Marshal(lbls)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedJSON, string(b))
|
||||
|
||||
var gotJ Labels
|
||||
err = json.Unmarshal(b, &gotJ)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lbls, gotJ)
|
||||
|
||||
expectedYAML := "aaa: \"111\"\nbbb: \"2222\"\nccc: \"33333\"\n"
|
||||
b, err = yaml.Marshal(lbls)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedYAML, string(b))
|
||||
|
||||
var gotY Labels
|
||||
err = yaml.Unmarshal(b, &gotY)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lbls, gotY)
|
||||
|
||||
// Now in a struct with a tag
|
||||
type foo struct {
|
||||
ALabels Labels `json:"a_labels,omitempty" yaml:"a_labels,omitempty"`
|
||||
}
|
||||
|
||||
f := foo{ALabels: lbls}
|
||||
b, err = json.Marshal(f)
|
||||
require.NoError(t, err)
|
||||
expectedJSONFromStruct := "{\"a_labels\":" + expectedJSON + "}"
|
||||
require.Equal(t, expectedJSONFromStruct, string(b))
|
||||
|
||||
var gotFJ foo
|
||||
err = json.Unmarshal(b, &gotFJ)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, gotFJ)
|
||||
|
||||
b, err = yaml.Marshal(f)
|
||||
require.NoError(t, err)
|
||||
expectedYAMLFromStruct := "a_labels:\n aaa: \"111\"\n bbb: \"2222\"\n ccc: \"33333\"\n"
|
||||
require.Equal(t, expectedYAMLFromStruct, string(b))
|
||||
|
||||
var gotFY foo
|
||||
err = yaml.Unmarshal(b, &gotFY)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, gotFY)
|
||||
}
|
||||
|
|
23
model/metadata/metadata.go
Normal file
23
model/metadata/metadata.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import "github.com/prometheus/prometheus/model/textparse"
|
||||
|
||||
// Metadata stores a series' metadata information.
|
||||
type Metadata struct {
|
||||
Type textparse.MetricType
|
||||
Unit string
|
||||
Help string
|
||||
}
|
|
@ -192,24 +192,29 @@ func (re Regexp) String() string {
|
|||
// are applied in order of input.
|
||||
// If a label set is dropped, nil is returned.
|
||||
// May return the input labelSet modified.
|
||||
func Process(labels labels.Labels, cfgs ...*Config) labels.Labels {
|
||||
func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels {
|
||||
lb := labels.NewBuilder(nil)
|
||||
for _, cfg := range cfgs {
|
||||
labels = relabel(labels, cfg)
|
||||
if labels == nil {
|
||||
lbls = relabel(lbls, cfg, lb)
|
||||
if lbls == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return labels
|
||||
return lbls
|
||||
}
|
||||
|
||||
func relabel(lset labels.Labels, cfg *Config) labels.Labels {
|
||||
values := make([]string, 0, len(cfg.SourceLabels))
|
||||
func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels {
|
||||
var va [16]string
|
||||
values := va[:0]
|
||||
if len(cfg.SourceLabels) > cap(values) {
|
||||
values = make([]string, 0, len(cfg.SourceLabels))
|
||||
}
|
||||
for _, ln := range cfg.SourceLabels {
|
||||
values = append(values, lset.Get(string(ln)))
|
||||
}
|
||||
val := strings.Join(values, cfg.Separator)
|
||||
|
||||
lb := labels.NewBuilder(lset)
|
||||
lb.Reset(lset)
|
||||
|
||||
switch cfg.Action {
|
||||
case Drop:
|
||||
|
@ -267,7 +272,7 @@ func relabel(lset labels.Labels, cfg *Config) labels.Labels {
|
|||
panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action))
|
||||
}
|
||||
|
||||
return lb.Labels()
|
||||
return lb.Labels(lset)
|
||||
}
|
||||
|
||||
// sum64 sums the md5 hash to an uint64.
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
@ -500,3 +501,160 @@ func TestTargetLabelValidity(t *testing.T) {
|
|||
"Expected %q to be %v", test.str, test.valid)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRelabel(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lbls labels.Labels
|
||||
config string
|
||||
cfgs []*Config
|
||||
}{
|
||||
{
|
||||
name: "example", // From prometheus/config/testdata/conf.good.yml.
|
||||
config: `
|
||||
- source_labels: [job, __meta_dns_name]
|
||||
regex: "(.*)some-[regex]"
|
||||
target_label: job
|
||||
replacement: foo-${1}
|
||||
# action defaults to 'replace'
|
||||
- source_labels: [abc]
|
||||
target_label: cde
|
||||
- replacement: static
|
||||
target_label: abc
|
||||
- regex:
|
||||
replacement: static
|
||||
target_label: abc`,
|
||||
lbls: labels.FromStrings("__meta_dns_name", "example-some-x.com", "abc", "def", "job", "foo"),
|
||||
},
|
||||
{
|
||||
name: "kubernetes",
|
||||
config: `
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_port_name
|
||||
regex: .*-metrics
|
||||
action: keep
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_label_name
|
||||
action: drop
|
||||
regex: ""
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_phase
|
||||
regex: Succeeded|Failed
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
|
||||
regex: "false"
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_scheme
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_path
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __address__
|
||||
- __meta_kubernetes_pod_annotation_prometheus_io_port
|
||||
target_label: __address__
|
||||
regex: (.+?)(\:\d+)?;(\d+)
|
||||
replacement: $1:$3
|
||||
action: replace
|
||||
- regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+)
|
||||
replacement: __param_$1
|
||||
action: labelmap
|
||||
- regex: __meta_kubernetes_pod_label_prometheus_io_label_(.+)
|
||||
action: labelmap
|
||||
- regex: __meta_kubernetes_pod_annotation_prometheus_io_label_(.+)
|
||||
action: labelmap
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
- __meta_kubernetes_pod_label_name
|
||||
separator: /
|
||||
target_label: job
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
target_label: namespace
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
target_label: pod
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_container_name
|
||||
target_label: container
|
||||
action: replace
|
||||
- source_labels:
|
||||
- __meta_kubernetes_pod_name
|
||||
- __meta_kubernetes_pod_container_name
|
||||
- __meta_kubernetes_pod_container_port_name
|
||||
separator: ':'
|
||||
target_label: instance
|
||||
action: replace
|
||||
- target_label: cluster
|
||||
replacement: dev-us-central-0
|
||||
- source_labels:
|
||||
- __meta_kubernetes_namespace
|
||||
regex: hosted-grafana
|
||||
action: drop
|
||||
- source_labels:
|
||||
- __address__
|
||||
target_label: __tmp_hash
|
||||
modulus: 3
|
||||
action: hashmod
|
||||
- source_labels:
|
||||
- __tmp_hash
|
||||
regex: ^0$
|
||||
action: keep
|
||||
- regex: __tmp_hash
|
||||
action: labeldrop`,
|
||||
lbls: labels.FromStrings(
|
||||
"__address__", "10.132.183.40:80",
|
||||
"__meta_kubernetes_namespace", "loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_annotation_promtail_loki_boltdb_shipper_hash", "50523b9759094a144adcec2eae0aa4ad",
|
||||
"__meta_kubernetes_pod_annotationpresent_promtail_loki_boltdb_shipper_hash", "true",
|
||||
"__meta_kubernetes_pod_container_init", "false",
|
||||
"__meta_kubernetes_pod_container_name", "promtail",
|
||||
"__meta_kubernetes_pod_container_port_name", "http-metrics",
|
||||
"__meta_kubernetes_pod_container_port_number", "80",
|
||||
"__meta_kubernetes_pod_container_port_protocol", "TCP",
|
||||
"__meta_kubernetes_pod_controller_kind", "DaemonSet",
|
||||
"__meta_kubernetes_pod_controller_name", "promtail-loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_host_ip", "10.128.0.178",
|
||||
"__meta_kubernetes_pod_ip", "10.132.183.40",
|
||||
"__meta_kubernetes_pod_label_controller_revision_hash", "555b77cd7d",
|
||||
"__meta_kubernetes_pod_label_name", "promtail-loki-boltdb-shipper",
|
||||
"__meta_kubernetes_pod_label_pod_template_generation", "45",
|
||||
"__meta_kubernetes_pod_labelpresent_controller_revision_hash", "true",
|
||||
"__meta_kubernetes_pod_labelpresent_name", "true",
|
||||
"__meta_kubernetes_pod_labelpresent_pod_template_generation", "true",
|
||||
"__meta_kubernetes_pod_name", "promtail-loki-boltdb-shipper-jgtr7",
|
||||
"__meta_kubernetes_pod_node_name", "gke-dev-us-central-0-main-n2s8-2-14d53341-9hkr",
|
||||
"__meta_kubernetes_pod_phase", "Running",
|
||||
"__meta_kubernetes_pod_ready", "true",
|
||||
"__meta_kubernetes_pod_uid", "4c586419-7f6c-448d-aeec-ca4fa5b05e60",
|
||||
"__metrics_path__", "/metrics",
|
||||
"__scheme__", "http",
|
||||
"__scrape_interval__", "15s",
|
||||
"__scrape_timeout__", "10s",
|
||||
"job", "kubernetes-pods"),
|
||||
},
|
||||
}
|
||||
for i := range tests {
|
||||
err := yaml.UnmarshalStrict([]byte(tests[i].config), &tests[i].cfgs)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = Process(tt.lbls, tt.cfgs...)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
|
|
|
@ -597,7 +597,7 @@ func TestOMNullByteHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
input: "a{b\x00=\"hiih\"} 1",
|
||||
err: "expected equal, got \"INVALID\"",
|
||||
err: "expected equal, got \"INVALID\"",
|
||||
},
|
||||
{
|
||||
input: "a\x00{b=\"ddd\"} 1",
|
||||
|
|
|
@ -322,7 +322,7 @@ func TestPromNullByteHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
input: "a{b\x00=\"hiih\"} 1",
|
||||
err: "expected equal, got \"INVALID\"",
|
||||
err: "expected equal, got \"INVALID\"",
|
||||
},
|
||||
{
|
||||
input: "a\x00{b=\"ddd\"} 1",
|
||||
|
@ -367,7 +367,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
|
||||
total := 0
|
||||
|
||||
b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
|
||||
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -395,7 +395,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) {
|
||||
total := 0
|
||||
|
||||
b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
|
||||
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -428,7 +428,7 @@ func BenchmarkParse(b *testing.B) {
|
|||
total := 0
|
||||
res := make(labels.Labels, 0, 5)
|
||||
|
||||
b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
|
||||
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -458,7 +458,10 @@ func BenchmarkParse(b *testing.B) {
|
|||
_ = total
|
||||
})
|
||||
b.Run("expfmt-text/"+fn, func(b *testing.B) {
|
||||
b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
|
||||
if parserName != "prometheus" {
|
||||
b.Skip()
|
||||
}
|
||||
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -507,7 +510,7 @@ func BenchmarkGzip(b *testing.B) {
|
|||
k := b.N / promtestdataSampleCount
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(k) * int64(n))
|
||||
b.SetBytes(int64(n) / promtestdataSampleCount)
|
||||
b.ResetTimer()
|
||||
|
||||
total := 0
|
||||
|
|
|
@ -361,7 +361,7 @@ func (n *Manager) Send(alerts ...*Alert) {
|
|||
}
|
||||
}
|
||||
|
||||
a.Labels = lb.Labels()
|
||||
a.Labels = lb.Labels(a.Labels)
|
||||
}
|
||||
|
||||
alerts = n.relabelAlerts(alerts)
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -243,7 +243,7 @@ func TestCustomDo(t *testing.T) {
|
|||
func TestExternalLabels(t *testing.T) {
|
||||
h := NewManager(&Options{
|
||||
QueueCapacity: 3 * maxBatchSize,
|
||||
ExternalLabels: labels.Labels{{Name: "a", Value: "b"}},
|
||||
ExternalLabels: labels.FromStrings("a", "b"),
|
||||
RelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
SourceLabels: model.LabelNames{"alertname"},
|
||||
|
@ -570,7 +570,7 @@ func makeInputTargetGroup() *targetgroup.Group {
|
|||
}
|
||||
|
||||
func TestLabelsToOpenAPILabelSet(t *testing.T) {
|
||||
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.Labels{{Name: "aaa", Value: "111"}, {Name: "bbb", Value: "222"}}))
|
||||
require.Equal(t, models.LabelSet{"aaa": "111", "bbb": "222"}, labelsToOpenAPILabelSet(labels.FromStrings("aaa", "111", "bbb", "222")))
|
||||
}
|
||||
|
||||
// TestHangingNotifier validates that targets updates happen even when there are
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -128,6 +129,8 @@ type Query interface {
|
|||
type QueryOpts struct {
|
||||
// Enables recording per-step statistics if the engine has it enabled as well. Disabled by default.
|
||||
EnablePerStepStats bool
|
||||
// Lookback delta duration for this query.
|
||||
LookbackDelta time.Duration
|
||||
}
|
||||
|
||||
// query implements the Query interface.
|
||||
|
@ -437,11 +440,17 @@ func (ng *Engine) newQuery(q storage.Queryable, opts *QueryOpts, expr parser.Exp
|
|||
opts = &QueryOpts{}
|
||||
}
|
||||
|
||||
lookbackDelta := opts.LookbackDelta
|
||||
if lookbackDelta <= 0 {
|
||||
lookbackDelta = ng.lookbackDelta
|
||||
}
|
||||
|
||||
es := &parser.EvalStmt{
|
||||
Expr: PreprocessExpr(expr, start, end),
|
||||
Start: start,
|
||||
End: end,
|
||||
Interval: interval,
|
||||
Expr: PreprocessExpr(expr, start, end),
|
||||
Start: start,
|
||||
End: end,
|
||||
Interval: interval,
|
||||
LookbackDelta: lookbackDelta,
|
||||
}
|
||||
qry := &query{
|
||||
stmt: es,
|
||||
|
@ -636,7 +645,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
ctx: ctxInnerEval,
|
||||
maxSamples: ng.maxSamplesPerQuery,
|
||||
logger: ng.logger,
|
||||
lookbackDelta: ng.lookbackDelta,
|
||||
lookbackDelta: s.LookbackDelta,
|
||||
samplesStats: query.sampleStats,
|
||||
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
|
||||
}
|
||||
|
@ -688,7 +697,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
ctx: ctxInnerEval,
|
||||
maxSamples: ng.maxSamplesPerQuery,
|
||||
logger: ng.logger,
|
||||
lookbackDelta: ng.lookbackDelta,
|
||||
lookbackDelta: s.LookbackDelta,
|
||||
samplesStats: query.sampleStats,
|
||||
noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn,
|
||||
}
|
||||
|
@ -801,7 +810,7 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS
|
|||
}
|
||||
|
||||
if evalRange == 0 {
|
||||
start = start - durationMilliseconds(ng.lookbackDelta)
|
||||
start = start - durationMilliseconds(s.LookbackDelta)
|
||||
} else {
|
||||
// For all matrix queries we want to ensure that we have (end-start) + range selected
|
||||
// this way we have `range` data before the start time
|
||||
|
@ -815,6 +824,20 @@ func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorS
|
|||
return start, end
|
||||
}
|
||||
|
||||
func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration {
|
||||
var interval time.Duration
|
||||
for _, node := range path {
|
||||
switch n := node.(type) {
|
||||
case *parser.SubqueryExpr:
|
||||
interval = n.Step
|
||||
if n.Step == 0 {
|
||||
interval = time.Duration(ng.noStepSubqueryIntervalFn(durationMilliseconds(n.Range))) * time.Millisecond
|
||||
}
|
||||
}
|
||||
}
|
||||
return interval
|
||||
}
|
||||
|
||||
func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) {
|
||||
// Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range.
|
||||
// The evaluation of the VectorSelector inside then evaluates the given range and unsets
|
||||
|
@ -825,10 +848,14 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) {
|
|||
switch n := node.(type) {
|
||||
case *parser.VectorSelector:
|
||||
start, end := ng.getTimeRangesForSelector(s, n, path, evalRange)
|
||||
interval := ng.getLastSubqueryInterval(path)
|
||||
if interval == 0 {
|
||||
interval = s.Interval
|
||||
}
|
||||
hints := &storage.SelectHints{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: durationMilliseconds(s.Interval),
|
||||
Step: durationMilliseconds(interval),
|
||||
Range: durationMilliseconds(evalRange),
|
||||
Func: extractFuncFromPath(path),
|
||||
}
|
||||
|
@ -937,7 +964,7 @@ func (ev *evaluator) error(err error) {
|
|||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of evaluation.
|
||||
func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
|
||||
func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
return
|
||||
|
@ -949,7 +976,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
|
|||
buf := make([]byte, 64<<10)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
|
||||
level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf))
|
||||
level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf))
|
||||
*errp = fmt.Errorf("unexpected error: %w", err)
|
||||
case errWithWarnings:
|
||||
*errp = err.err
|
||||
|
@ -960,7 +987,7 @@ func (ev *evaluator) recover(ws *storage.Warnings, errp *error) {
|
|||
}
|
||||
|
||||
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) {
|
||||
defer ev.recover(&ws, &err)
|
||||
defer ev.recover(expr, &ws, &err)
|
||||
|
||||
v, ws = ev.eval(expr)
|
||||
return v, ws, nil
|
||||
|
@ -1235,7 +1262,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
|||
case *parser.AggregateExpr:
|
||||
// Grouping labels must be sorted (expected both by generateGroupingKey() and aggregation()).
|
||||
sortedGrouping := e.Grouping
|
||||
sort.Strings(sortedGrouping)
|
||||
slices.Sort(sortedGrouping)
|
||||
|
||||
// Prepare a function to initialise series helpers with the grouping key.
|
||||
buf := make([]byte, 0, 1024)
|
||||
|
@ -2051,13 +2078,13 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
|
||||
func signatureFunc(on bool, b []byte, names ...string) func(labels.Labels) string {
|
||||
if on {
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
return func(lset labels.Labels) string {
|
||||
return string(lset.BytesWithLabels(b, names...))
|
||||
}
|
||||
}
|
||||
names = append([]string{labels.MetricName}, names...)
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
return func(lset labels.Labels) string {
|
||||
return string(lset.BytesWithoutLabels(b, names...))
|
||||
}
|
||||
|
@ -2108,7 +2135,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V
|
|||
}
|
||||
}
|
||||
|
||||
ret := enh.lb.Labels()
|
||||
ret := enh.lb.Labels(nil)
|
||||
enh.resultMetric[str] = ret
|
||||
return ret
|
||||
}
|
||||
|
@ -2148,7 +2175,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala
|
|||
}
|
||||
|
||||
func dropMetricName(l labels.Labels) labels.Labels {
|
||||
return labels.NewBuilder(l).Del(labels.MetricName).Labels()
|
||||
return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil)
|
||||
}
|
||||
|
||||
// scalarBinop evaluates a binary operation between two Scalars.
|
||||
|
@ -2259,7 +2286,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
// operator is less frequently used than other aggregations, we're fine having to
|
||||
// re-compute the grouping key on each step for this case.
|
||||
grouping = append(grouping, valueLabel)
|
||||
sort.Strings(grouping)
|
||||
slices.Sort(grouping)
|
||||
recomputeGroupingKey = true
|
||||
}
|
||||
}
|
||||
|
@ -2272,7 +2299,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
if op == parser.COUNT_VALUES {
|
||||
lb.Reset(metric)
|
||||
lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
|
||||
metric = lb.Labels()
|
||||
metric = lb.Labels(nil)
|
||||
|
||||
// We've changed the metric so we have to recompute the grouping key.
|
||||
recomputeGroupingKey = true
|
||||
|
@ -2296,7 +2323,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
} else {
|
||||
lb.Keep(grouping...)
|
||||
}
|
||||
m := lb.Labels()
|
||||
m := lb.Labels(nil)
|
||||
newAgg := &groupedAggregation{
|
||||
labels: m,
|
||||
value: s.V,
|
||||
|
|
|
@ -330,56 +330,56 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
|||
}, {
|
||||
query: "foo[2m:1s]", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000},
|
||||
{Start: 175000, End: 300000, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s])", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Func: "count_over_time"},
|
||||
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 175000, End: 300000, Func: "count_over_time"},
|
||||
{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 75000, End: 200000, Func: "count_over_time"},
|
||||
{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -25000, End: 100000, Func: "count_over_time"},
|
||||
{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 165000, End: 290000, Func: "count_over_time"},
|
||||
{Start: 165000, End: 290000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 155000, End: 280000, Func: "count_over_time"},
|
||||
{Start: 155000, End: 280000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time"},
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
// When the @ is on the vector selector, the enclosing subquery parameters
|
||||
// don't affect the hint ranges.
|
||||
query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 185000, End: 190000, Func: "count_over_time"},
|
||||
{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -45000, End: 80000, Func: "count_over_time"},
|
||||
{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "foo", start: 10000, end: 20000,
|
||||
|
@ -498,13 +498,13 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
|||
}, {
|
||||
query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}},
|
||||
{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}, Step: 1000},
|
||||
},
|
||||
}, {
|
||||
query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 95000, End: 120000, Func: "sum", By: true},
|
||||
{Start: 95000, End: 120000, Func: "max", By: true},
|
||||
{Start: 95000, End: 120000, Func: "sum", By: true, Step: 5000},
|
||||
{Start: 95000, End: 120000, Func: "max", By: true, Step: 5000},
|
||||
},
|
||||
}, {
|
||||
query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
|
||||
|
@ -544,12 +544,12 @@ func TestSelectHintsSetCorrectly(t *testing.T) {
|
|||
}, { // Hints are based on the inner most subquery timestamp.
|
||||
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"},
|
||||
{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
},
|
||||
}, { // Hints are based on the inner most subquery timestamp.
|
||||
query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
|
||||
expected: []*storage.SelectHints{
|
||||
{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"},
|
||||
{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time", Step: 25000},
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
@ -1439,7 +1439,7 @@ load 1ms
|
|||
ref, err := app.Append(0, lblsneg, -1000000, 1000)
|
||||
require.NoError(t, err)
|
||||
for ts := int64(-1000000 + 1000); ts <= 0; ts += 1000 {
|
||||
_, err := app.Append(ref, nil, ts, -float64(ts/1000)+1)
|
||||
_, err := app.Append(ref, labels.EmptyLabels(), ts, -float64(ts/1000)+1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -1608,7 +1608,7 @@ load 1ms
|
|||
{V: 3600, T: 6 * 60 * 1000},
|
||||
{V: 3600, T: 7 * 60 * 1000},
|
||||
},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1641,17 +1641,27 @@ load 1ms
|
|||
}
|
||||
|
||||
func TestRecoverEvaluatorRuntime(t *testing.T) {
|
||||
ev := &evaluator{logger: log.NewNopLogger()}
|
||||
var output []interface{}
|
||||
logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error {
|
||||
output = append(output, keyvals...)
|
||||
return nil
|
||||
}))
|
||||
ev := &evaluator{logger: logger}
|
||||
|
||||
expr, _ := parser.ParseExpr("sum(up)")
|
||||
|
||||
var err error
|
||||
defer ev.recover(nil, &err)
|
||||
|
||||
defer func() {
|
||||
require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0")
|
||||
require.Contains(t, output, "sum(up)")
|
||||
}()
|
||||
defer ev.recover(expr, nil, &err)
|
||||
|
||||
// Cause a runtime panic.
|
||||
var a []int
|
||||
//nolint:govet
|
||||
a[123] = 1
|
||||
|
||||
require.EqualError(t, err, "unexpected error")
|
||||
}
|
||||
|
||||
func TestRecoverEvaluatorError(t *testing.T) {
|
||||
|
@ -1663,7 +1673,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
|
|||
defer func() {
|
||||
require.EqualError(t, err, e.Error())
|
||||
}()
|
||||
defer ev.recover(nil, &err)
|
||||
defer ev.recover(nil, nil, &err)
|
||||
|
||||
panic(e)
|
||||
}
|
||||
|
@ -1683,7 +1693,7 @@ func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
|
|||
require.EqualError(t, err, e.Error())
|
||||
require.Equal(t, warnings, ws, "wrong warning message")
|
||||
}()
|
||||
defer ev.recover(&ws, &err)
|
||||
defer ev.recover(nil, &ws, &err)
|
||||
|
||||
panic(e)
|
||||
}
|
||||
|
@ -1899,7 +1909,7 @@ func TestSubquerySelector(t *testing.T) {
|
|||
Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 270, T: 90000}, {V: 300, T: 100000}, {V: 330, T: 110000}, {V: 360, T: 120000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
|
@ -1913,7 +1923,7 @@ func TestSubquerySelector(t *testing.T) {
|
|||
Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 800, T: 80000}, {V: 900, T: 90000}, {V: 1000, T: 100000}, {V: 1100, T: 110000}, {V: 1200, T: 120000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
|
@ -1927,7 +1937,7 @@ func TestSubquerySelector(t *testing.T) {
|
|||
Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 1000, T: 100000}, {V: 1000, T: 105000}, {V: 1100, T: 110000}, {V: 1100, T: 115000}, {V: 1200, T: 120000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
nil,
|
||||
|
@ -2983,7 +2993,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -2998,7 +3008,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3013,7 +3023,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 0, T: 0}, {V: 11, T: 60000}, {V: 1100, T: 120000}, {V: 110000, T: 180000}, {V: 11000000, T: 240000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3028,7 +3038,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 5, T: 0}, {V: 59, T: 60000}, {V: 9, T: 120000}, {V: 956, T: 180000}},
|
||||
Metric: labels.Labels{},
|
||||
Metric: labels.EmptyLabels(),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3043,7 +3053,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}},
|
||||
Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3058,7 +3068,7 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}},
|
||||
Metric: labels.Labels{labels.Label{Name: "__name__", Value: "metric"}},
|
||||
Metric: labels.FromStrings("__name__", "metric"),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3074,17 +3084,17 @@ func TestRangeQuery(t *testing.T) {
|
|||
Result: Matrix{
|
||||
Series{
|
||||
Points: []Point{{V: 1, T: 0}, {V: 3, T: 60000}, {V: 5, T: 120000}},
|
||||
Metric: labels.Labels{
|
||||
labels.Label{Name: "__name__", Value: "bar"},
|
||||
labels.Label{Name: "job", Value: "2"},
|
||||
},
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "bar",
|
||||
"job", "2",
|
||||
),
|
||||
},
|
||||
Series{
|
||||
Points: []Point{{V: 3, T: 60000}, {V: 5, T: 120000}},
|
||||
Metric: labels.Labels{
|
||||
labels.Label{Name: "__name__", Value: "foo"},
|
||||
labels.Label{Name: "job", Value: "1"},
|
||||
},
|
||||
Metric: labels.FromStrings(
|
||||
"__name__", "foo",
|
||||
"job", "1",
|
||||
),
|
||||
},
|
||||
},
|
||||
Start: time.Unix(0, 0),
|
||||
|
@ -3136,3 +3146,96 @@ func TestRangeQuery(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLookbackDelta(t *testing.T) {
|
||||
var (
|
||||
load = `load 5m
|
||||
metric 0 1 2
|
||||
`
|
||||
query = "metric"
|
||||
lastDatapointTs = time.Unix(600, 0)
|
||||
)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
ts time.Time
|
||||
engineLookback, queryLookback time.Duration
|
||||
expectSamples bool
|
||||
}{
|
||||
{
|
||||
name: "default lookback delta",
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta),
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside default lookback delta",
|
||||
ts: lastDatapointTs.Add(defaultLookbackDelta + time.Millisecond),
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "custom engine lookback delta",
|
||||
ts: lastDatapointTs.Add(10 * time.Minute),
|
||||
engineLookback: 10 * time.Minute,
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside custom engine lookback delta",
|
||||
ts: lastDatapointTs.Add(10*time.Minute + time.Millisecond),
|
||||
engineLookback: 10 * time.Minute,
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20 * time.Minute),
|
||||
engineLookback: 10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: true,
|
||||
},
|
||||
{
|
||||
name: "outside custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20*time.Minute + time.Millisecond),
|
||||
engineLookback: 10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: false,
|
||||
},
|
||||
{
|
||||
name: "negative custom query lookback delta",
|
||||
ts: lastDatapointTs.Add(20 * time.Minute),
|
||||
engineLookback: -10 * time.Minute,
|
||||
queryLookback: 20 * time.Minute,
|
||||
expectSamples: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
c := c
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
test, err := NewTest(t, load)
|
||||
require.NoError(t, err)
|
||||
defer test.Close()
|
||||
|
||||
err = test.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
eng := test.QueryEngine()
|
||||
if c.engineLookback != 0 {
|
||||
eng.lookbackDelta = c.engineLookback
|
||||
}
|
||||
opts := &QueryOpts{
|
||||
LookbackDelta: c.queryLookback,
|
||||
}
|
||||
qry, err := eng.NewInstantQuery(test.Queryable(), opts, query, c.ts)
|
||||
require.NoError(t, err)
|
||||
|
||||
res := qry.Exec(test.Context())
|
||||
require.NoError(t, res.Err)
|
||||
vec, ok := res.Value.(Vector)
|
||||
require.True(t, ok)
|
||||
if c.expectSamples {
|
||||
require.NotEmpty(t, vec)
|
||||
} else {
|
||||
require.Empty(t, vec)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,17 +31,23 @@ import (
|
|||
// FunctionCall is the type of a PromQL function implementation
|
||||
//
|
||||
// vals is a list of the evaluated arguments for the function call.
|
||||
// For range vectors it will be a Matrix with one series, instant vectors a
|
||||
// Vector, scalars a Vector with one series whose value is the scalar
|
||||
// value,and nil for strings.
|
||||
//
|
||||
// For range vectors it will be a Matrix with one series, instant vectors a
|
||||
// Vector, scalars a Vector with one series whose value is the scalar
|
||||
// value,and nil for strings.
|
||||
//
|
||||
// args are the original arguments to the function, where you can access
|
||||
// matrixSelectors, vectorSelectors, and StringLiterals.
|
||||
// matrixSelectors, vectorSelectors, and StringLiterals.
|
||||
//
|
||||
// enh.Out is a pre-allocated empty vector that you may use to accumulate
|
||||
// output before returning it. The vectors in vals should not be returned.a
|
||||
// output before returning it. The vectors in vals should not be returned.a
|
||||
//
|
||||
// Range vector functions need only return a vector with the right value,
|
||||
// the metric and timestamp are not needed.
|
||||
// the metric and timestamp are not needed.
|
||||
//
|
||||
// Instant vector functions need only return a vector with the right values and
|
||||
// metrics, the timestamp are not needed.
|
||||
// metrics, the timestamp are not needed.
|
||||
//
|
||||
// Scalar results should be returned as the value of a sample in a Vector.
|
||||
type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector
|
||||
|
||||
|
@ -813,7 +819,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
if !ok {
|
||||
el.Metric = labels.NewBuilder(el.Metric).
|
||||
Del(excludedLabels...).
|
||||
Labels()
|
||||
Labels(nil)
|
||||
|
||||
mb = &metricWithBuckets{el.Metric, nil}
|
||||
enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb
|
||||
|
@ -912,7 +918,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
if len(res) > 0 {
|
||||
lb.Set(dst, string(res))
|
||||
}
|
||||
outMetric = lb.Labels()
|
||||
outMetric = lb.Labels(nil)
|
||||
enh.Dmn[h] = outMetric
|
||||
}
|
||||
}
|
||||
|
@ -980,7 +986,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
|
|||
lb.Set(dst, strval)
|
||||
}
|
||||
|
||||
outMetric = lb.Labels()
|
||||
outMetric = lb.Labels(nil)
|
||||
enh.Dmn[h] = outMetric
|
||||
}
|
||||
|
||||
|
@ -1233,14 +1239,14 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels {
|
|||
continue
|
||||
}
|
||||
if ma.Type == labels.MatchEqual && !m.Has(ma.Name) {
|
||||
m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels()
|
||||
m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil)
|
||||
} else {
|
||||
empty = append(empty, ma.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range empty {
|
||||
m = labels.NewBuilder(m).Del(v).Labels()
|
||||
m = labels.NewBuilder(m).Del(v).Labels(nil)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
|
|
@ -28,13 +28,12 @@ import (
|
|||
// or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is
|
||||
// to list them as follows:
|
||||
//
|
||||
// - Statements
|
||||
// - statement types (alphabetical)
|
||||
// - ...
|
||||
// - Expressions
|
||||
// - expression types (alphabetical)
|
||||
// - ...
|
||||
//
|
||||
// - Statements
|
||||
// - statement types (alphabetical)
|
||||
// - ...
|
||||
// - Expressions
|
||||
// - expression types (alphabetical)
|
||||
// - ...
|
||||
type Node interface {
|
||||
// String representation of the node that returns the given node when parsed
|
||||
// as part of a valid query.
|
||||
|
@ -68,6 +67,8 @@ type EvalStmt struct {
|
|||
Start, End time.Time
|
||||
// Time between two evaluated instants for the range [Start:End].
|
||||
Interval time.Duration
|
||||
// Lookback delta to use for this evaluation.
|
||||
LookbackDelta time.Duration
|
||||
}
|
||||
|
||||
func (*EvalStmt) PromQLStmt() {}
|
||||
|
|
|
@ -3603,7 +3603,7 @@ var testSeries = []struct {
|
|||
}{
|
||||
{
|
||||
input: `{} 1 2 3`,
|
||||
expectedMetric: labels.Labels{},
|
||||
expectedMetric: labels.EmptyLabels(),
|
||||
expectedValues: newSeq(1, 2, 3),
|
||||
}, {
|
||||
input: `{a="b"} -1 2 3`,
|
||||
|
|
|
@ -37,25 +37,25 @@ func TestAggregateExprPretty(t *testing.T) {
|
|||
},
|
||||
{
|
||||
in: `sum without(job,foo) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum without(job, foo) (
|
||||
out: `sum without (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum(task:errors:rate10s{job="s"}) without(job,foo)`,
|
||||
out: `sum without(job, foo) (
|
||||
out: `sum without (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum (task:errors:rate10s{job="s"}) by(job,foo)`,
|
||||
out: `sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)`,
|
||||
},
|
||||
|
@ -68,17 +68,17 @@ func TestAggregateExprPretty(t *testing.T) {
|
|||
},
|
||||
{
|
||||
in: `sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
in: `sum by(job,foo) (sum by(job,foo) (sum by(job,foo) (task:errors:rate10s{job="s"})))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
sum by (job, foo) (
|
||||
sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)
|
||||
|
@ -87,8 +87,8 @@ func TestAggregateExprPretty(t *testing.T) {
|
|||
{
|
||||
in: `sum by(job,foo)
|
||||
(sum by(job,foo) (task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
|
@ -96,8 +96,8 @@ func TestAggregateExprPretty(t *testing.T) {
|
|||
{
|
||||
in: `sum by(job,foo)
|
||||
(sum(task:errors:rate10s{job="s"}) without(job,foo))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum without(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
sum without (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
|
@ -106,8 +106,8 @@ func TestAggregateExprPretty(t *testing.T) {
|
|||
in: `sum by(job,foo) # Comment 1.
|
||||
(sum by(job,foo) ( # Comment 2.
|
||||
task:errors:rate10s{job="s"}))`,
|
||||
out: `sum by(job, foo) (
|
||||
sum by(job, foo) (
|
||||
out: `sum by (job, foo) (
|
||||
sum by (job, foo) (
|
||||
task:errors:rate10s{job="s"}
|
||||
)
|
||||
)`,
|
||||
|
@ -139,7 +139,7 @@ func TestBinaryExprPretty(t *testing.T) {
|
|||
{
|
||||
in: `a + ignoring(job) b`,
|
||||
out: ` a
|
||||
+ ignoring(job)
|
||||
+ ignoring (job)
|
||||
b`,
|
||||
},
|
||||
{
|
||||
|
@ -175,19 +175,21 @@ func TestBinaryExprPretty(t *testing.T) {
|
|||
{
|
||||
in: `foo_1 + ignoring(foo) foo_2 + ignoring(job) group_left foo_3 + on(instance) group_right foo_4`,
|
||||
out: ` foo_1
|
||||
+ ignoring(foo)
|
||||
+ ignoring (foo)
|
||||
foo_2
|
||||
+ ignoring(job) group_left()
|
||||
+ ignoring (job) group_left ()
|
||||
foo_3
|
||||
+ on(instance) group_right()
|
||||
+ on (instance) group_right ()
|
||||
foo_4`,
|
||||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
t.Run(test.in, func(t *testing.T) {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -560,7 +562,7 @@ or
|
|||
},
|
||||
{
|
||||
in: `min by (job, integration) (rate(alertmanager_notifications_failed_total{job="alertmanager", integration=~".*"}[5m]) / rate(alertmanager_notifications_total{job="alertmanager", integration="~.*"}[5m])) > 0.01`,
|
||||
out: ` min by(job, integration) (
|
||||
out: ` min by (job, integration) (
|
||||
rate(
|
||||
alertmanager_notifications_failed_total{integration=~".*",job="alertmanager"}[5m]
|
||||
)
|
||||
|
@ -575,7 +577,7 @@ or
|
|||
{
|
||||
in: `(count by (job) (changes(process_start_time_seconds{job="alertmanager"}[10m]) > 4) / count by (job) (up{job="alertmanager"})) >= 0.5`,
|
||||
out: ` (
|
||||
count by(job) (
|
||||
count by (job) (
|
||||
changes(
|
||||
process_start_time_seconds{job="alertmanager"}[10m]
|
||||
)
|
||||
|
@ -583,7 +585,7 @@ or
|
|||
4
|
||||
)
|
||||
/
|
||||
count by(job) (
|
||||
count by (job) (
|
||||
up{job="alertmanager"}
|
||||
)
|
||||
)
|
||||
|
@ -630,7 +632,7 @@ func TestUnaryPretty(t *testing.T) {
|
|||
in: `-histogram_quantile(0.99, sum by (le) (rate(foo[1m])))`,
|
||||
out: `-histogram_quantile(
|
||||
0.99,
|
||||
sum by(le) (
|
||||
sum by (le) (
|
||||
rate(
|
||||
foo[1m]
|
||||
)
|
||||
|
@ -659,8 +661,10 @@ func TestUnaryPretty(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, test := range inputs {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
t.Run(test.in, func(t *testing.T) {
|
||||
expr, err := ParseExpr(test.in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.out, Prettify(expr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,9 +77,9 @@ func (node *AggregateExpr) getAggOpStr() string {
|
|||
|
||||
switch {
|
||||
case node.Without:
|
||||
aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", "))
|
||||
aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", "))
|
||||
case len(node.Grouping) > 0:
|
||||
aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", "))
|
||||
aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", "))
|
||||
}
|
||||
|
||||
return aggrString
|
||||
|
@ -103,14 +103,14 @@ func (node *BinaryExpr) getMatchingStr() string {
|
|||
if vm.On {
|
||||
vmTag = "on"
|
||||
}
|
||||
matching = fmt.Sprintf(" %s(%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
|
||||
matching = fmt.Sprintf(" %s (%s)", vmTag, strings.Join(vm.MatchingLabels, ", "))
|
||||
|
||||
if vm.Card == CardManyToOne || vm.Card == CardOneToMany {
|
||||
vmCard := "right"
|
||||
if vm.Card == CardManyToOne {
|
||||
vmCard = "left"
|
||||
}
|
||||
matching += fmt.Sprintf(" group_%s(%s)", vmCard, strings.Join(vm.Include, ", "))
|
||||
matching += fmt.Sprintf(" group_%s (%s)", vmCard, strings.Join(vm.Include, ", "))
|
||||
}
|
||||
}
|
||||
return matching
|
||||
|
|
|
@ -33,13 +33,16 @@ func TestExprString(t *testing.T) {
|
|||
out: `sum(task:errors:rate10s{job="s"})`,
|
||||
},
|
||||
{
|
||||
in: `sum by(code) (task:errors:rate10s{job="s"})`,
|
||||
in: `sum by(code) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum by (code) (task:errors:rate10s{job="s"})`,
|
||||
},
|
||||
{
|
||||
in: `sum without() (task:errors:rate10s{job="s"})`,
|
||||
in: `sum without() (task:errors:rate10s{job="s"})`,
|
||||
out: `sum without () (task:errors:rate10s{job="s"})`,
|
||||
},
|
||||
{
|
||||
in: `sum without(instance) (task:errors:rate10s{job="s"})`,
|
||||
in: `sum without(instance) (task:errors:rate10s{job="s"})`,
|
||||
out: `sum without (instance) (task:errors:rate10s{job="s"})`,
|
||||
},
|
||||
{
|
||||
in: `topk(5, task:errors:rate10s{job="s"})`,
|
||||
|
@ -48,26 +51,32 @@ func TestExprString(t *testing.T) {
|
|||
in: `count_values("value", task:errors:rate10s{job="s"})`,
|
||||
},
|
||||
{
|
||||
in: `a - on() c`,
|
||||
in: `a - on() c`,
|
||||
out: `a - on () c`,
|
||||
},
|
||||
{
|
||||
in: `a - on(b) c`,
|
||||
in: `a - on(b) c`,
|
||||
out: `a - on (b) c`,
|
||||
},
|
||||
{
|
||||
in: `a - on(b) group_left(x) c`,
|
||||
in: `a - on(b) group_left(x) c`,
|
||||
out: `a - on (b) group_left (x) c`,
|
||||
},
|
||||
{
|
||||
in: `a - on(b) group_left(x, y) c`,
|
||||
in: `a - on(b) group_left(x, y) c`,
|
||||
out: `a - on (b) group_left (x, y) c`,
|
||||
},
|
||||
{
|
||||
in: `a - on(b) group_left c`,
|
||||
out: `a - on(b) group_left() c`,
|
||||
out: `a - on (b) group_left () c`,
|
||||
},
|
||||
{
|
||||
in: `a - on(b) group_left() (c)`,
|
||||
in: `a - on(b) group_left() (c)`,
|
||||
out: `a - on (b) group_left () (c)`,
|
||||
},
|
||||
{
|
||||
in: `a - ignoring(b) c`,
|
||||
in: `a - ignoring(b) c`,
|
||||
out: `a - ignoring (b) c`,
|
||||
},
|
||||
{
|
||||
in: `a - ignoring() c`,
|
||||
|
|
|
@ -64,6 +64,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
|
|||
level.Error(logger).Log("msg", "Failed to open query log file", "err", err)
|
||||
return
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
brokenJSON := make([]byte, filesize)
|
||||
_, err = fd.Read(brokenJSON)
|
||||
|
|
|
@ -785,7 +785,7 @@ func (ll *LazyLoader) QueryEngine() *Engine {
|
|||
|
||||
// Queryable allows querying the LazyLoader's data.
|
||||
// Note: only the samples till the max timestamp used
|
||||
// in `WithSamplesTill` can be queried.
|
||||
// in `WithSamplesTill` can be queried.
|
||||
func (ll *LazyLoader) Queryable() storage.Queryable {
|
||||
return ll.storage
|
||||
}
|
||||
|
|
|
@ -138,15 +138,22 @@ func (vec Vector) String() string {
|
|||
// Such a behavior is semantically undefined
|
||||
// https://github.com/prometheus/prometheus/issues/4562
|
||||
func (vec Vector) ContainsSameLabelset() bool {
|
||||
l := make(map[uint64]struct{}, len(vec))
|
||||
for _, s := range vec {
|
||||
hash := s.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
switch len(vec) {
|
||||
case 0, 1:
|
||||
return false
|
||||
case 2:
|
||||
return vec[0].Metric.Hash() == vec[1].Metric.Hash()
|
||||
default:
|
||||
l := make(map[uint64]struct{}, len(vec))
|
||||
for _, ss := range vec {
|
||||
hash := ss.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Matrix is a slice of Series that implements sort.Interface and
|
||||
|
@ -181,15 +188,22 @@ func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
|||
// Such a behavior is semantically undefined.
|
||||
// https://github.com/prometheus/prometheus/issues/4562
|
||||
func (m Matrix) ContainsSameLabelset() bool {
|
||||
l := make(map[uint64]struct{}, len(m))
|
||||
for _, ss := range m {
|
||||
hash := ss.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
switch len(m) {
|
||||
case 0, 1:
|
||||
return false
|
||||
case 2:
|
||||
return m[0].Metric.Hash() == m[1].Metric.Hash()
|
||||
default:
|
||||
l := make(map[uint64]struct{}, len(m))
|
||||
for _, ss := range m {
|
||||
hash := ss.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Result holds the resulting value of an execution or an error
|
||||
|
|
110
promql/value_test.go
Normal file
110
promql/value_test.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
func TestVector_ContainsSameLabelset(t *testing.T) {
|
||||
for name, tc := range map[string]struct {
|
||||
vector Vector
|
||||
expected bool
|
||||
}{
|
||||
"empty vector": {
|
||||
vector: Vector{},
|
||||
expected: false,
|
||||
},
|
||||
"vector with one series": {
|
||||
vector: Vector{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"vector with two different series": {
|
||||
vector: Vector{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "b")},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"vector with two equal series": {
|
||||
vector: Vector{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"vector with three series, two equal": {
|
||||
vector: Vector{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "b")},
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, tc.vector.ContainsSameLabelset())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatrix_ContainsSameLabelset(t *testing.T) {
|
||||
for name, tc := range map[string]struct {
|
||||
matrix Matrix
|
||||
expected bool
|
||||
}{
|
||||
"empty matrix": {
|
||||
matrix: Matrix{},
|
||||
expected: false,
|
||||
},
|
||||
"matrix with one series": {
|
||||
matrix: Matrix{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"matrix with two different series": {
|
||||
matrix: Matrix{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "b")},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
"matrix with two equal series": {
|
||||
matrix: Matrix{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
"matrix with three series, two equal": {
|
||||
matrix: Matrix{
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
{Metric: labels.FromStrings("lbl", "b")},
|
||||
{Metric: labels.FromStrings("lbl", "a")},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, tc.matrix.ContainsSameLabelset())
|
||||
})
|
||||
}
|
||||
}
|
|
@ -24,7 +24,8 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"go.uber.org/atomic"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
|
@ -121,17 +122,17 @@ type AlertingRule struct {
|
|||
externalURL string
|
||||
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
|
||||
// only after the restoration.
|
||||
restored bool
|
||||
// Protects the below.
|
||||
mtx sync.Mutex
|
||||
restored *atomic.Bool
|
||||
// Time in seconds taken to evaluate rule.
|
||||
evaluationDuration time.Duration
|
||||
evaluationDuration *atomic.Duration
|
||||
// Timestamp of last evaluation of rule.
|
||||
evaluationTimestamp time.Time
|
||||
evaluationTimestamp *atomic.Time
|
||||
// The health of the alerting rule.
|
||||
health RuleHealth
|
||||
health *atomic.String
|
||||
// The last error seen by the alerting rule.
|
||||
lastError error
|
||||
lastError *atomic.Error
|
||||
// activeMtx Protects the `active` map.
|
||||
activeMtx sync.Mutex
|
||||
// A map of alerts which are currently active (Pending or Firing), keyed by
|
||||
// the fingerprint of the labelset they correspond to.
|
||||
active map[uint64]*Alert
|
||||
|
@ -151,17 +152,20 @@ func NewAlertingRule(
|
|||
}
|
||||
|
||||
return &AlertingRule{
|
||||
name: name,
|
||||
vector: vec,
|
||||
holdDuration: hold,
|
||||
labels: labels,
|
||||
annotations: annotations,
|
||||
externalLabels: el,
|
||||
externalURL: externalURL,
|
||||
health: HealthUnknown,
|
||||
active: map[uint64]*Alert{},
|
||||
logger: logger,
|
||||
restored: restored,
|
||||
name: name,
|
||||
vector: vec,
|
||||
holdDuration: hold,
|
||||
labels: labels,
|
||||
annotations: annotations,
|
||||
externalLabels: el,
|
||||
externalURL: externalURL,
|
||||
active: map[uint64]*Alert{},
|
||||
logger: logger,
|
||||
restored: atomic.NewBool(restored),
|
||||
health: atomic.NewString(string(HealthUnknown)),
|
||||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,30 +176,22 @@ func (r *AlertingRule) Name() string {
|
|||
|
||||
// SetLastError sets the current error seen by the alerting rule.
|
||||
func (r *AlertingRule) SetLastError(err error) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.lastError = err
|
||||
r.lastError.Store(err)
|
||||
}
|
||||
|
||||
// LastError returns the last error seen by the alerting rule.
|
||||
func (r *AlertingRule) LastError() error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.lastError
|
||||
return r.lastError.Load()
|
||||
}
|
||||
|
||||
// SetHealth sets the current health of the alerting rule.
|
||||
func (r *AlertingRule) SetHealth(health RuleHealth) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.health = health
|
||||
r.health.Store(string(health))
|
||||
}
|
||||
|
||||
// Health returns the current health of the alerting rule.
|
||||
func (r *AlertingRule) Health() RuleHealth {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.health
|
||||
return RuleHealth(r.health.String())
|
||||
}
|
||||
|
||||
// Query returns the query expression of the alerting rule.
|
||||
|
@ -230,7 +226,7 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
|
|||
lb.Set(alertStateLabel, alert.State.String())
|
||||
|
||||
s := promql.Sample{
|
||||
Metric: lb.Labels(),
|
||||
Metric: lb.Labels(nil),
|
||||
Point: promql.Point{T: timestamp.FromTime(ts), V: 1},
|
||||
}
|
||||
return s
|
||||
|
@ -248,7 +244,7 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro
|
|||
lb.Set(labels.AlertName, r.name)
|
||||
|
||||
s := promql.Sample{
|
||||
Metric: lb.Labels(),
|
||||
Metric: lb.Labels(nil),
|
||||
Point: promql.Point{T: timestamp.FromTime(ts), V: v},
|
||||
}
|
||||
return s
|
||||
|
@ -283,42 +279,32 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto
|
|||
|
||||
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
|
||||
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.evaluationDuration = dur
|
||||
r.evaluationDuration.Store(dur)
|
||||
}
|
||||
|
||||
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
|
||||
func (r *AlertingRule) GetEvaluationDuration() time.Duration {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.evaluationDuration
|
||||
return r.evaluationDuration.Load()
|
||||
}
|
||||
|
||||
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
|
||||
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.evaluationTimestamp = ts
|
||||
r.evaluationTimestamp.Store(ts)
|
||||
}
|
||||
|
||||
// GetEvaluationTimestamp returns the time the evaluation took place.
|
||||
func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.evaluationTimestamp
|
||||
return r.evaluationTimestamp.Load()
|
||||
}
|
||||
|
||||
// SetRestored updates the restoration state of the alerting rule.
|
||||
func (r *AlertingRule) SetRestored(restored bool) {
|
||||
r.restored = restored
|
||||
r.restored.Store(restored)
|
||||
}
|
||||
|
||||
// Restored returns the restoration state of the alerting rule.
|
||||
func (r *AlertingRule) Restored() bool {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.restored
|
||||
return r.restored.Load()
|
||||
}
|
||||
|
||||
// resolvedRetention is the duration for which a resolved alert instance
|
||||
|
@ -333,9 +319,6 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
|
|||
return nil, err
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
// Create pending alerts for any new vector elements in the alert expression
|
||||
// or update the expression value for existing elements.
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
|
@ -390,7 +373,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
|
|||
annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)})
|
||||
}
|
||||
|
||||
lbs := lb.Labels()
|
||||
lbs := lb.Labels(nil)
|
||||
h := lbs.Hash()
|
||||
resultFPs[h] = struct{}{}
|
||||
|
||||
|
@ -407,6 +390,9 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
|
|||
}
|
||||
}
|
||||
|
||||
r.activeMtx.Lock()
|
||||
defer r.activeMtx.Unlock()
|
||||
|
||||
for h, a := range alerts {
|
||||
// Check whether we already have alerting state for the identifying label set.
|
||||
// Update the last value and annotations if so, create a new alert entry otherwise.
|
||||
|
@ -441,7 +427,7 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
|
|||
a.FiredAt = ts
|
||||
}
|
||||
|
||||
if r.restored {
|
||||
if r.restored.Load() {
|
||||
vec = append(vec, r.sample(a, ts.Add(-evalDelay)))
|
||||
vec = append(vec, r.forStateSample(a, ts.Add(-evalDelay), float64(a.ActiveAt.Unix())))
|
||||
}
|
||||
|
@ -458,8 +444,8 @@ func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts tim
|
|||
// State returns the maximum state of alert instances for this rule.
|
||||
// StateFiring > StatePending > StateInactive
|
||||
func (r *AlertingRule) State() AlertState {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.activeMtx.Lock()
|
||||
defer r.activeMtx.Unlock()
|
||||
|
||||
maxState := StateInactive
|
||||
for _, a := range r.active {
|
||||
|
@ -484,8 +470,8 @@ func (r *AlertingRule) ActiveAlerts() []*Alert {
|
|||
// currentAlerts returns all instances of alerts for this rule. This may include
|
||||
// inactive alerts that were previously firing.
|
||||
func (r *AlertingRule) currentAlerts() []*Alert {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.activeMtx.Lock()
|
||||
defer r.activeMtx.Unlock()
|
||||
|
||||
alerts := make([]*Alert, 0, len(r.active))
|
||||
|
||||
|
@ -501,8 +487,8 @@ func (r *AlertingRule) currentAlerts() []*Alert {
|
|||
// and not on its copy.
|
||||
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
|
||||
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.activeMtx.Lock()
|
||||
defer r.activeMtx.Unlock()
|
||||
|
||||
for _, a := range r.active {
|
||||
f(a)
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestAlertingRuleState(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range tests {
|
||||
rule := NewAlertingRule(test.name, nil, 0, nil, nil, nil, "", true, nil)
|
||||
rule := NewAlertingRule(test.name, nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
rule.active = test.active
|
||||
got := rule.State()
|
||||
require.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
|
||||
|
@ -91,7 +91,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
// If an alert is going back and forth between two label values it will never fire.
|
||||
// Instead, you should write two alerts with constant labels.
|
||||
labels.FromStrings("severity", "{{ if lt $value 80.0 }}critical{{ else }}warning{{ end }}"),
|
||||
nil, nil, "", true, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
results := []promql.Vector{
|
||||
|
@ -190,8 +190,8 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -200,7 +200,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.FromStrings("foo", "bar", "dings", "bums"),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
|
@ -284,8 +284,8 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -294,8 +294,8 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("templated_label", "The external URL is {{ $externalURL }}."),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"http://localhost:1234",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -378,8 +378,8 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("empty_label", ""),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -416,6 +416,83 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
|||
require.Equal(t, result, filteredRes)
|
||||
}
|
||||
|
||||
func TestAlertingRuleQueryInTemplate(t *testing.T) {
|
||||
suite, err := promql.NewTest(t, `
|
||||
load 1m
|
||||
http_requests{job="app-server", instance="0"} 70 85 70 70
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
defer suite.Close()
|
||||
|
||||
require.NoError(t, suite.Run())
|
||||
|
||||
expr, err := parser.ParseExpr(`sum(http_requests) < 100`)
|
||||
require.NoError(t, err)
|
||||
|
||||
ruleWithQueryInTemplate := NewAlertingRule(
|
||||
"ruleWithQueryInTemplate",
|
||||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("label", "value"),
|
||||
labels.FromStrings("templated_label", `{{- with "sort(sum(http_requests) by (instance))" | query -}}
|
||||
{{- range $i,$v := . -}}
|
||||
instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
`),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
evalTime := time.Unix(0, 0)
|
||||
|
||||
startQueryCh := make(chan struct{})
|
||||
getDoneCh := make(chan struct{})
|
||||
slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) {
|
||||
if q == "sort(sum(http_requests) by (instance))" {
|
||||
// This is a minimum reproduction of issue 10703, expand template with query.
|
||||
close(startQueryCh)
|
||||
select {
|
||||
case <-getDoneCh:
|
||||
case <-time.After(time.Millisecond * 10):
|
||||
// Assert no blocking when template expanding.
|
||||
require.Fail(t, "unexpected blocking when template expanding.")
|
||||
}
|
||||
}
|
||||
return EngineQueryFunc(suite.QueryEngine(), suite.Storage())(ctx, q, ts)
|
||||
}
|
||||
go func() {
|
||||
<-startQueryCh
|
||||
_ = ruleWithQueryInTemplate.Health()
|
||||
_ = ruleWithQueryInTemplate.LastError()
|
||||
_ = ruleWithQueryInTemplate.GetEvaluationDuration()
|
||||
_ = ruleWithQueryInTemplate.GetEvaluationTimestamp()
|
||||
close(getDoneCh)
|
||||
}()
|
||||
_, err = ruleWithQueryInTemplate.Eval(
|
||||
suite.Context(), evalTime, slowQueryFunc, nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkAlertingRuleAtomicField(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
rule := NewAlertingRule("bench", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for i := 0; i < b.N; i++ {
|
||||
rule.GetEvaluationTimestamp()
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
rule.SetEvaluationTimestamp(time.Now())
|
||||
}
|
||||
})
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestAlertingRuleDuplicate(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
defer storage.Close()
|
||||
|
@ -439,8 +516,8 @@ func TestAlertingRuleDuplicate(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("test", "test"),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -485,8 +562,8 @@ func TestAlertingRuleLimit(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("test", "test"),
|
||||
nil,
|
||||
nil,
|
||||
labels.EmptyLabels(),
|
||||
labels.EmptyLabels(),
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
|
@ -557,13 +634,13 @@ func TestQueryForStateSeries(t *testing.T) {
|
|||
nil,
|
||||
time.Minute,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
nil, nil, "", true, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
alert := &Alert{
|
||||
State: 0,
|
||||
Labels: nil,
|
||||
Annotations: nil,
|
||||
Labels: labels.EmptyLabels(),
|
||||
Annotations: labels.EmptyLabels(),
|
||||
Value: 0,
|
||||
ActiveAt: time.Time{},
|
||||
FiredAt: time.Time{},
|
||||
|
|
|
@ -28,8 +28,9 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/goleak"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
|
@ -65,7 +66,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
nil, nil, "", true, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
result := promql.Vector{
|
||||
{
|
||||
|
@ -210,7 +211,7 @@ func TestForStateAddSamples(t *testing.T) {
|
|||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("severity", "{{\"c\"}}ritical"),
|
||||
nil, nil, "", true, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
result := promql.Vector{
|
||||
{
|
||||
|
@ -386,7 +387,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
expr,
|
||||
alertForDuration,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
nil, nil, "", true, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
|
||||
)
|
||||
|
||||
group := NewGroup(GroupOptions{
|
||||
|
@ -452,7 +453,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
expr,
|
||||
alertForDuration,
|
||||
labels.FromStrings("severity", "critical"),
|
||||
nil, nil, "", false, nil,
|
||||
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
|
||||
)
|
||||
newGroup := NewGroup(GroupOptions{
|
||||
Name: "default",
|
||||
|
@ -625,36 +626,36 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
|
|||
func TestCopyState(t *testing.T) {
|
||||
oldGroup := &Group{
|
||||
rules: []Rule{
|
||||
NewAlertingRule("alert", nil, 0, nil, nil, nil, "", true, nil),
|
||||
NewRecordingRule("rule1", nil, nil),
|
||||
NewRecordingRule("rule2", nil, nil),
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v3"}}),
|
||||
NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, "", true, nil),
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
|
||||
NewRecordingRule("rule2", nil, labels.EmptyLabels()),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v3")),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
},
|
||||
seriesInPreviousEval: []map[string]labels.Labels{
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
|
||||
{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
|
||||
{"r3c": labels.Labels{{Name: "l1", Value: "v3"}}},
|
||||
{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
|
||||
{"r3a": labels.FromStrings("l1", "v1")},
|
||||
{"r3b": labels.FromStrings("l1", "v2")},
|
||||
{"r3c": labels.FromStrings("l1", "v3")},
|
||||
{"a2": labels.FromStrings("l2", "v1")},
|
||||
},
|
||||
evaluationTime: time.Second,
|
||||
}
|
||||
oldGroup.rules[0].(*AlertingRule).active[42] = nil
|
||||
newGroup := &Group{
|
||||
rules: []Rule{
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v0"}}),
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
|
||||
NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
|
||||
NewAlertingRule("alert", nil, 0, nil, nil, nil, "", true, nil),
|
||||
NewRecordingRule("rule1", nil, nil),
|
||||
NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v0"}}, nil, nil, "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, "", true, nil),
|
||||
NewRecordingRule("rule4", nil, nil),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v0")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v1")),
|
||||
NewRecordingRule("rule3", nil, labels.FromStrings("l1", "v2")),
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule1", nil, labels.EmptyLabels()),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v0"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewAlertingRule("alert2", nil, 0, labels.FromStrings("l2", "v1"), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("rule4", nil, labels.EmptyLabels()),
|
||||
},
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, 8),
|
||||
}
|
||||
|
@ -662,19 +663,19 @@ func TestCopyState(t *testing.T) {
|
|||
|
||||
want := []map[string]labels.Labels{
|
||||
nil,
|
||||
{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
|
||||
{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
|
||||
{"r3a": labels.FromStrings("l1", "v1")},
|
||||
{"r3b": labels.FromStrings("l1", "v2")},
|
||||
{},
|
||||
{},
|
||||
nil,
|
||||
{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
|
||||
{"a2": labels.FromStrings("l2", "v1")},
|
||||
nil,
|
||||
}
|
||||
require.Equal(t, want, newGroup.seriesInPreviousEval)
|
||||
require.Equal(t, oldGroup.rules[0], newGroup.rules[3])
|
||||
require.Equal(t, oldGroup.evaluationTime, newGroup.evaluationTime)
|
||||
require.Equal(t, oldGroup.lastEvaluation, newGroup.lastEvaluation)
|
||||
require.Equal(t, []labels.Labels{{{Name: "l1", Value: "v3"}}}, newGroup.staleSeries)
|
||||
require.Equal(t, []labels.Labels{labels.FromStrings("l1", "v3")}, newGroup.staleSeries)
|
||||
}
|
||||
|
||||
func TestDeletedRuleMarkedStale(t *testing.T) {
|
||||
|
@ -682,10 +683,10 @@ func TestDeletedRuleMarkedStale(t *testing.T) {
|
|||
defer st.Close()
|
||||
oldGroup := &Group{
|
||||
rules: []Rule{
|
||||
NewRecordingRule("rule1", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
|
||||
NewRecordingRule("rule1", nil, labels.FromStrings("l1", "v1")),
|
||||
},
|
||||
seriesInPreviousEval: []map[string]labels.Labels{
|
||||
{"r1": labels.Labels{{Name: "l1", Value: "v1"}}},
|
||||
{"r1": labels.FromStrings("l1", "v1")},
|
||||
},
|
||||
}
|
||||
newGroup := &Group{
|
||||
|
@ -741,7 +742,7 @@ func TestUpdate(t *testing.T) {
|
|||
ruleManager.start()
|
||||
defer ruleManager.Stop()
|
||||
|
||||
err := ruleManager.Update(10*time.Second, files, nil, "", nil)
|
||||
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
|
||||
ogs := map[string]*Group{}
|
||||
|
@ -752,7 +753,7 @@ func TestUpdate(t *testing.T) {
|
|||
ogs[h] = g
|
||||
}
|
||||
|
||||
err = ruleManager.Update(10*time.Second, files, nil, "", nil)
|
||||
err = ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
for h, g := range ruleManager.groups {
|
||||
for _, actual := range g.seriesInPreviousEval {
|
||||
|
@ -771,7 +772,7 @@ func TestUpdate(t *testing.T) {
|
|||
defer os.Remove(tmpFile.Name())
|
||||
defer tmpFile.Close()
|
||||
|
||||
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil, "", nil)
|
||||
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
for h, g := range ruleManager.groups {
|
||||
|
@ -958,7 +959,6 @@ func reloadRules(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleMa
|
|||
|
||||
func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
|
||||
reloadRules(rgs, t, tmpFile, ruleManager, 0)
|
||||
|
||||
for h, g := range ruleManager.groups {
|
||||
if ogs[h] == g {
|
||||
t.Fail()
|
||||
|
@ -993,7 +993,7 @@ func TestNotify(t *testing.T) {
|
|||
|
||||
expr, err := parser.ParseExpr("a > 1")
|
||||
require.NoError(t, err)
|
||||
rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, nil, "", true, log.NewNopLogger())
|
||||
rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger())
|
||||
group := NewGroup(GroupOptions{
|
||||
Name: "alert",
|
||||
Interval: time.Second,
|
||||
|
@ -1102,7 +1102,7 @@ func TestMetricsUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, c := range cases {
|
||||
err := ruleManager.Update(time.Second, c.files, nil, "", nil)
|
||||
err := ruleManager.Update(time.Second, c.files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
|
||||
|
@ -1176,7 +1176,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) {
|
|||
|
||||
var totalStaleNaN int
|
||||
for i, c := range cases {
|
||||
err := ruleManager.Update(time.Second, c.files, nil, "", nil)
|
||||
err := ruleManager.Update(time.Second, c.files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
time.Sleep(3 * time.Second)
|
||||
totalStaleNaN += c.staleNaN
|
||||
|
@ -1218,11 +1218,11 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
err := ruleManager.Update(2*time.Second, files, nil, "", nil)
|
||||
err := ruleManager.Update(2*time.Second, files, labels.EmptyLabels(), "", nil)
|
||||
time.Sleep(4 * time.Second)
|
||||
require.NoError(t, err)
|
||||
start := time.Now()
|
||||
err = ruleManager.Update(3*time.Second, files[:0], nil, "", nil)
|
||||
err = ruleManager.Update(3*time.Second, files[:0], labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
ruleManager.Stop()
|
||||
stopped = true
|
||||
|
@ -1265,8 +1265,8 @@ func TestGroupHasAlertingRules(t *testing.T) {
|
|||
group: &Group{
|
||||
name: "HasAlertingRule",
|
||||
rules: []Rule{
|
||||
NewAlertingRule("alert", nil, 0, nil, nil, nil, "", true, nil),
|
||||
NewRecordingRule("record", nil, nil),
|
||||
NewAlertingRule("alert", nil, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil),
|
||||
NewRecordingRule("record", nil, labels.EmptyLabels()),
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
|
@ -1282,7 +1282,7 @@ func TestGroupHasAlertingRules(t *testing.T) {
|
|||
group: &Group{
|
||||
name: "HasOnlyRecordingRule",
|
||||
rules: []Rule{
|
||||
NewRecordingRule("record", nil, nil),
|
||||
NewRecordingRule("record", nil, labels.EmptyLabels()),
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
|
@ -1628,17 +1628,20 @@ func TestUpdateMissedEvalMetrics(t *testing.T) {
|
|||
m[1] = activeAlert
|
||||
|
||||
rule := &AlertingRule{
|
||||
name: "HTTPRequestRateLow",
|
||||
vector: expr,
|
||||
holdDuration: 5 * time.Minute,
|
||||
labels: labels.FromStrings("severity", "critical"),
|
||||
annotations: nil,
|
||||
externalLabels: nil,
|
||||
externalURL: "",
|
||||
health: HealthUnknown,
|
||||
active: m,
|
||||
logger: nil,
|
||||
restored: true,
|
||||
name: "HTTPRequestRateLow",
|
||||
vector: expr,
|
||||
holdDuration: 5 * time.Minute,
|
||||
labels: labels.FromStrings("severity", "critical"),
|
||||
annotations: labels.EmptyLabels(),
|
||||
externalLabels: nil,
|
||||
externalURL: "",
|
||||
active: m,
|
||||
logger: nil,
|
||||
restored: atomic.NewBool(true),
|
||||
health: atomic.NewString(string(HealthUnknown)),
|
||||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
}
|
||||
|
||||
group := NewGroup(GroupOptions{
|
||||
|
|
|
@ -17,10 +17,10 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"go.uber.org/atomic"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
|
@ -33,25 +33,26 @@ type RecordingRule struct {
|
|||
name string
|
||||
vector parser.Expr
|
||||
labels labels.Labels
|
||||
// Protects the below.
|
||||
mtx sync.Mutex
|
||||
// The health of the recording rule.
|
||||
health RuleHealth
|
||||
health *atomic.String
|
||||
// Timestamp of last evaluation of the recording rule.
|
||||
evaluationTimestamp time.Time
|
||||
evaluationTimestamp *atomic.Time
|
||||
// The last error seen by the recording rule.
|
||||
lastError error
|
||||
lastError *atomic.Error
|
||||
// Duration of how long it took to evaluate the recording rule.
|
||||
evaluationDuration time.Duration
|
||||
evaluationDuration *atomic.Duration
|
||||
}
|
||||
|
||||
// NewRecordingRule returns a new recording rule.
|
||||
func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule {
|
||||
return &RecordingRule{
|
||||
name: name,
|
||||
vector: vector,
|
||||
health: HealthUnknown,
|
||||
labels: lset,
|
||||
name: name,
|
||||
vector: vector,
|
||||
labels: lset,
|
||||
health: atomic.NewString(string(HealthUnknown)),
|
||||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,7 +89,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, evalDelay time.Duration, ts
|
|||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
sample.Metric = lb.Labels()
|
||||
sample.Metric = lb.Labels(nil)
|
||||
}
|
||||
|
||||
// Check that the rule does not produce identical metrics after applying
|
||||
|
@ -124,56 +125,40 @@ func (rule *RecordingRule) String() string {
|
|||
|
||||
// SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation.
|
||||
func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.evaluationDuration = dur
|
||||
rule.evaluationDuration.Store(dur)
|
||||
}
|
||||
|
||||
// SetLastError sets the current error seen by the recording rule.
|
||||
func (rule *RecordingRule) SetLastError(err error) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.lastError = err
|
||||
rule.lastError.Store(err)
|
||||
}
|
||||
|
||||
// LastError returns the last error seen by the recording rule.
|
||||
func (rule *RecordingRule) LastError() error {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.lastError
|
||||
return rule.lastError.Load()
|
||||
}
|
||||
|
||||
// SetHealth sets the current health of the recording rule.
|
||||
func (rule *RecordingRule) SetHealth(health RuleHealth) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.health = health
|
||||
rule.health.Store(string(health))
|
||||
}
|
||||
|
||||
// Health returns the current health of the recording rule.
|
||||
func (rule *RecordingRule) Health() RuleHealth {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.health
|
||||
return RuleHealth(rule.health.Load())
|
||||
}
|
||||
|
||||
// GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule.
|
||||
func (rule *RecordingRule) GetEvaluationDuration() time.Duration {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.evaluationDuration
|
||||
return rule.evaluationDuration.Load()
|
||||
}
|
||||
|
||||
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
|
||||
func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.evaluationTimestamp = ts
|
||||
rule.evaluationTimestamp.Store(ts)
|
||||
}
|
||||
|
||||
// GetEvaluationTimestamp returns the time the evaluation took place.
|
||||
func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.evaluationTimestamp
|
||||
return rule.evaluationTimestamp.Load()
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
|
@ -39,6 +40,11 @@ func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (s
|
|||
func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) Commit() error { return nil }
|
||||
func (a nopAppender) Rollback() error { return nil }
|
||||
|
||||
|
@ -57,6 +63,8 @@ type collectResultAppender struct {
|
|||
rolledbackResult []sample
|
||||
pendingExemplars []exemplar.Exemplar
|
||||
resultExemplars []exemplar.Exemplar
|
||||
pendingMetadata []metadata.Metadata
|
||||
resultMetadata []metadata.Metadata
|
||||
}
|
||||
|
||||
func (a *collectResultAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
|
@ -89,11 +97,25 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
|
|||
return a.next.AppendExemplar(ref, l, e)
|
||||
}
|
||||
|
||||
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||
a.pendingMetadata = append(a.pendingMetadata, m)
|
||||
if ref == 0 {
|
||||
ref = storage.SeriesRef(rand.Uint64())
|
||||
}
|
||||
if a.next == nil {
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
return a.next.UpdateMetadata(ref, l, m)
|
||||
}
|
||||
|
||||
func (a *collectResultAppender) Commit() error {
|
||||
a.result = append(a.result, a.pendingResult...)
|
||||
a.resultExemplars = append(a.resultExemplars, a.pendingExemplars...)
|
||||
a.resultMetadata = append(a.resultMetadata, a.pendingMetadata...)
|
||||
a.pendingResult = nil
|
||||
a.pendingExemplars = nil
|
||||
a.pendingMetadata = nil
|
||||
if a.next == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -124,10 +124,14 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager
|
|||
|
||||
// Options are the configuration parameters to the scrape manager.
|
||||
type Options struct {
|
||||
ExtraMetrics bool
|
||||
ExtraMetrics bool
|
||||
NoDefaultPort bool
|
||||
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||
// to help lookup metric metadata. Should be false for Prometheus.
|
||||
PassMetadataInContext bool
|
||||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
|
||||
|
@ -207,7 +211,7 @@ func (m *Manager) reload() {
|
|||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||
continue
|
||||
}
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.PassMetadataInContext, m.opts.HTTPClientOptions)
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||
continue
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -31,11 +31,12 @@ import (
|
|||
|
||||
func TestPopulateLabels(t *testing.T) {
|
||||
cases := []struct {
|
||||
in labels.Labels
|
||||
cfg *config.ScrapeConfig
|
||||
res labels.Labels
|
||||
resOrig labels.Labels
|
||||
err string
|
||||
in labels.Labels
|
||||
cfg *config.ScrapeConfig
|
||||
noDefaultPort bool
|
||||
res labels.Labels
|
||||
resOrig labels.Labels
|
||||
err string
|
||||
}{
|
||||
// Regular population of scrape config options.
|
||||
{
|
||||
|
@ -331,11 +332,104 @@ func TestPopulateLabels(t *testing.T) {
|
|||
resOrig: nil,
|
||||
err: "scrape timeout cannot be greater than scrape interval (\"2s\" > \"1s\")",
|
||||
},
|
||||
// Don't attach default port.
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Remove default port (http).
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "http",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:80",
|
||||
model.SchemeLabel: "http",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
// Remove default port (https).
|
||||
{
|
||||
in: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:443",
|
||||
}),
|
||||
cfg: &config.ScrapeConfig{
|
||||
Scheme: "https",
|
||||
MetricsPath: "/metrics",
|
||||
JobName: "job",
|
||||
ScrapeInterval: model.Duration(time.Second),
|
||||
ScrapeTimeout: model.Duration(time.Second),
|
||||
},
|
||||
noDefaultPort: true,
|
||||
res: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4",
|
||||
model.InstanceLabel: "1.2.3.4:443",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
resOrig: labels.FromMap(map[string]string{
|
||||
model.AddressLabel: "1.2.3.4:443",
|
||||
model.SchemeLabel: "https",
|
||||
model.MetricsPathLabel: "/metrics",
|
||||
model.JobLabel: "job",
|
||||
model.ScrapeIntervalLabel: "1s",
|
||||
model.ScrapeTimeoutLabel: "1s",
|
||||
}),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
in := c.in.Copy()
|
||||
|
||||
res, orig, err := PopulateLabels(c.in, c.cfg)
|
||||
res, orig, err := PopulateLabels(c.in, c.cfg, c.noDefaultPort)
|
||||
if c.err != "" {
|
||||
require.EqualError(t, err, c.err)
|
||||
} else {
|
||||
|
|
119
scrape/scrape.go
119
scrape/scrape.go
|
@ -41,6 +41,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -239,6 +240,8 @@ type scrapePool struct {
|
|||
|
||||
// Constructor for new scrape loops. This is settable for testing convenience.
|
||||
newLoop func(scrapeLoopOptions) loop
|
||||
|
||||
noDefaultPort bool
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -264,13 +267,13 @@ const maxAheadTime = 10 * time.Minute
|
|||
|
||||
type labelsMutator func(labels.Labels) labels.Labels
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics, passMetadataInContext bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) {
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||
targetScrapePools.Inc()
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, httpOpts...)
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -287,7 +290,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
httpOpts: httpOpts,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
|
@ -314,10 +318,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
opts.labelLimits,
|
||||
opts.interval,
|
||||
opts.timeout,
|
||||
reportExtraMetrics,
|
||||
options.ExtraMetrics,
|
||||
options.EnableMetadataStorage,
|
||||
opts.target,
|
||||
cache,
|
||||
passMetadataInContext,
|
||||
options.PassMetadataInContext,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -480,7 +484,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|||
var all []*Target
|
||||
sp.droppedTargets = []*Target{}
|
||||
for _, tg := range tgs {
|
||||
targets, failures := TargetsFromGroup(tg, sp.config)
|
||||
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort)
|
||||
for _, err := range failures {
|
||||
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
|
||||
}
|
||||
|
@ -673,7 +677,7 @@ func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*re
|
|||
}
|
||||
}
|
||||
|
||||
res := lb.Labels()
|
||||
res := lb.Labels(nil)
|
||||
|
||||
if len(rc) > 0 {
|
||||
res = relabel.Process(res, rc...)
|
||||
|
@ -713,7 +717,7 @@ func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels
|
|||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
return lb.Labels()
|
||||
return lb.Labels(nil)
|
||||
}
|
||||
|
||||
// appender returns an appender for ingested samples from the target.
|
||||
|
@ -870,7 +874,8 @@ type scrapeLoop struct {
|
|||
|
||||
disabledEndOfRunStalenessMarkers bool
|
||||
|
||||
reportExtraMetrics bool
|
||||
reportExtraMetrics bool
|
||||
appendMetadataToWAL bool
|
||||
}
|
||||
|
||||
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
||||
|
@ -903,15 +908,15 @@ type scrapeCache struct {
|
|||
|
||||
// metaEntry holds meta information about a metric.
|
||||
type metaEntry struct {
|
||||
lastIter uint64 // Last scrape iteration the entry was observed at.
|
||||
typ textparse.MetricType
|
||||
help string
|
||||
unit string
|
||||
metadata.Metadata
|
||||
|
||||
lastIter uint64 // Last scrape iteration the entry was observed at.
|
||||
lastIterChange uint64 // Last scrape iteration the entry was changed at.
|
||||
}
|
||||
|
||||
func (m *metaEntry) size() int {
|
||||
// The attribute lastIter although part of the struct it is not metadata.
|
||||
return len(m.help) + len(m.unit) + len(m.typ)
|
||||
return len(m.Help) + len(m.Unit) + len(m.Type)
|
||||
}
|
||||
|
||||
func newScrapeCache() *scrapeCache {
|
||||
|
@ -1024,10 +1029,13 @@ func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
|
|||
|
||||
e, ok := c.metadata[yoloString(metric)]
|
||||
if !ok {
|
||||
e = &metaEntry{typ: textparse.MetricTypeUnknown}
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
}
|
||||
e.typ = t
|
||||
if e.Type != t {
|
||||
e.Type = t
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
c.metaMtx.Unlock()
|
||||
|
@ -1038,11 +1046,12 @@ func (c *scrapeCache) setHelp(metric, help []byte) {
|
|||
|
||||
e, ok := c.metadata[yoloString(metric)]
|
||||
if !ok {
|
||||
e = &metaEntry{typ: textparse.MetricTypeUnknown}
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
}
|
||||
if e.help != yoloString(help) {
|
||||
e.help = string(help)
|
||||
if e.Help != yoloString(help) {
|
||||
e.Help = string(help)
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
|
@ -1054,11 +1063,12 @@ func (c *scrapeCache) setUnit(metric, unit []byte) {
|
|||
|
||||
e, ok := c.metadata[yoloString(metric)]
|
||||
if !ok {
|
||||
e = &metaEntry{typ: textparse.MetricTypeUnknown}
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: textparse.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
}
|
||||
if e.unit != yoloString(unit) {
|
||||
e.unit = string(unit)
|
||||
if e.Unit != yoloString(unit) {
|
||||
e.Unit = string(unit)
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
|
@ -1075,9 +1085,9 @@ func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) {
|
|||
}
|
||||
return MetricMetadata{
|
||||
Metric: metric,
|
||||
Type: m.typ,
|
||||
Help: m.help,
|
||||
Unit: m.unit,
|
||||
Type: m.Type,
|
||||
Help: m.Help,
|
||||
Unit: m.Unit,
|
||||
}, true
|
||||
}
|
||||
|
||||
|
@ -1090,9 +1100,9 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
|||
for m, e := range c.metadata {
|
||||
res = append(res, MetricMetadata{
|
||||
Metric: m,
|
||||
Type: e.typ,
|
||||
Help: e.help,
|
||||
Unit: e.unit,
|
||||
Type: e.Type,
|
||||
Help: e.Help,
|
||||
Unit: e.Unit,
|
||||
})
|
||||
}
|
||||
return res
|
||||
|
@ -1132,8 +1142,8 @@ func newScrapeLoop(ctx context.Context,
|
|||
interval time.Duration,
|
||||
timeout time.Duration,
|
||||
reportExtraMetrics bool,
|
||||
appendMetadataToWAL bool,
|
||||
target *Target,
|
||||
metricMetadataStore MetricMetadataStore,
|
||||
passMetadataInContext bool,
|
||||
) *scrapeLoop {
|
||||
if l == nil {
|
||||
|
@ -1175,6 +1185,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
interval: interval,
|
||||
timeout: timeout,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
|
@ -1454,12 +1465,36 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
}
|
||||
|
||||
var (
|
||||
defTime = timestamp.FromTime(ts)
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
defTime = timestamp.FromTime(ts)
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
meta metadata.Metadata
|
||||
metadataChanged bool
|
||||
)
|
||||
|
||||
// updateMetadata updates the current iteration's metadata object and the
|
||||
// metadataChanged value if we have metadata in the scrape cache AND the
|
||||
// labelset is for a new series or the metadata for this series has just
|
||||
// changed. It returns a boolean based on whether the metadata was updated.
|
||||
updateMetadata := func(lset labels.Labels, isNewSeries bool) bool {
|
||||
if !sl.appendMetadataToWAL {
|
||||
return false
|
||||
}
|
||||
|
||||
sl.cache.metaMtx.Lock()
|
||||
defer sl.cache.metaMtx.Unlock()
|
||||
metaEntry, metaOk := sl.cache.metadata[yoloString([]byte(lset.Get(labels.MetricName)))]
|
||||
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
|
||||
metadataChanged = true
|
||||
meta.Type = metaEntry.Type
|
||||
meta.Unit = metaEntry.Unit
|
||||
meta.Help = metaEntry.Help
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take an appender with limits.
|
||||
app = appender(app, sl.sampleLimit)
|
||||
|
||||
|
@ -1509,6 +1544,10 @@ loop:
|
|||
t = *tp
|
||||
}
|
||||
|
||||
// Zero metadata out for current iteration until it's resolved.
|
||||
meta = metadata.Metadata{}
|
||||
metadataChanged = false
|
||||
|
||||
if sl.cache.getDropped(yoloString(met)) {
|
||||
continue
|
||||
}
|
||||
|
@ -1523,6 +1562,9 @@ loop:
|
|||
if ok {
|
||||
ref = ce.ref
|
||||
lset = ce.lset
|
||||
|
||||
// Update metadata only if it changed in the current iteration.
|
||||
updateMetadata(lset, false)
|
||||
} else {
|
||||
mets = p.Metric(&lset)
|
||||
hash = lset.Hash()
|
||||
|
@ -1547,6 +1589,9 @@ loop:
|
|||
targetScrapePoolExceededLabelLimits.Inc()
|
||||
break loop
|
||||
}
|
||||
|
||||
// Append metadata for new series if they were present.
|
||||
updateMetadata(lset, true)
|
||||
}
|
||||
|
||||
ref, err = app.Append(ref, lset, t, v)
|
||||
|
@ -1586,6 +1631,12 @@ loop:
|
|||
e = exemplar.Exemplar{} // reset for next time round loop
|
||||
}
|
||||
|
||||
if sl.appendMetadataToWAL && metadataChanged {
|
||||
if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil {
|
||||
// No need to fail the scrape on errors appending metadata.
|
||||
level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sampleLimitErr != nil {
|
||||
if err == nil {
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestNewScrapePool(t *testing.T) {
|
|||
var (
|
||||
app = &nopAppendable{}
|
||||
cfg = &config.ScrapeConfig{}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
)
|
||||
|
||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||
|
@ -91,7 +91,7 @@ func TestDroppedTargetsList(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
||||
expectedLength = 1
|
||||
)
|
||||
|
@ -116,22 +116,12 @@ func TestDiscoveredLabelsUpdate(t *testing.T) {
|
|||
}
|
||||
sp.activeTargets = make(map[uint64]*Target)
|
||||
t1 := &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
Name: "label",
|
||||
Value: "name",
|
||||
},
|
||||
},
|
||||
discoveredLabels: labels.FromStrings("label", "name"),
|
||||
}
|
||||
sp.activeTargets[t1.hash()] = t1
|
||||
|
||||
t2 := &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
Name: "labelNew",
|
||||
Value: "nameNew",
|
||||
},
|
||||
},
|
||||
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
|
||||
}
|
||||
sp.sync([]*Target{t2})
|
||||
|
||||
|
@ -488,7 +478,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
func TestScrapePoolAppender(t *testing.T) {
|
||||
cfg := &config.ScrapeConfig{}
|
||||
app := &nopAppendable{}
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
|
||||
loop := sp.newLoop(scrapeLoopOptions{
|
||||
target: &Target{},
|
||||
|
@ -530,7 +520,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
|||
newConfig := func() *config.ScrapeConfig {
|
||||
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
|
||||
}
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{})
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
|
@ -624,7 +614,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
|||
1,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -696,7 +686,7 @@ func TestScrapeLoopStop(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -771,7 +761,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -826,7 +816,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
time.Second,
|
||||
100*time.Millisecond,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -885,7 +875,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
|||
time.Second,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -943,7 +933,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1000,7 +990,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1093,7 +1083,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1155,7 +1145,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1221,7 +1211,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1303,7 +1293,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1417,7 +1407,7 @@ func TestScrapeLoopAppend(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1507,7 +1497,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
|
|||
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
|
||||
},
|
||||
nil,
|
||||
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, nil, nil, false,
|
||||
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, false, nil, false,
|
||||
)
|
||||
slApp := sl.appender(context.Background())
|
||||
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
||||
|
@ -1543,7 +1533,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1601,7 +1591,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1678,7 +1668,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1726,7 +1716,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1777,7 +1767,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1888,7 +1878,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -1953,7 +1943,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2005,7 +1995,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2041,7 +2031,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2090,7 +2080,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2135,7 +2125,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2392,7 +2382,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2433,7 +2423,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2473,7 +2463,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2517,7 +2507,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|||
nil, nil,
|
||||
func(l labels.Labels) labels.Labels {
|
||||
if l.Has("drop") {
|
||||
return labels.Labels{}
|
||||
return labels.FromStrings("no", "name") // This label set will trigger an error.
|
||||
}
|
||||
return l
|
||||
},
|
||||
|
@ -2531,7 +2521,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2626,22 +2616,9 @@ func TestReuseScrapeCache(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
Name: "labelNew",
|
||||
Value: "nameNew",
|
||||
},
|
||||
labels.Label{
|
||||
Name: "labelNew1",
|
||||
Value: "nameNew1",
|
||||
},
|
||||
labels.Label{
|
||||
Name: "labelNew2",
|
||||
Value: "nameNew2",
|
||||
},
|
||||
},
|
||||
discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
|
||||
}
|
||||
proxyURL, _ = url.Parse("http://localhost:2128")
|
||||
)
|
||||
|
@ -2807,7 +2784,7 @@ func TestScrapeAddFast(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2839,14 +2816,9 @@ func TestReuseCacheRace(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.Labels{
|
||||
labels.Label{
|
||||
Name: "labelNew",
|
||||
Value: "nameNew",
|
||||
},
|
||||
},
|
||||
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
|
||||
}
|
||||
)
|
||||
defer sp.stop()
|
||||
|
@ -2898,7 +2870,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
|||
10*time.Millisecond,
|
||||
time.Hour,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -2971,7 +2943,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, false, false, nil)
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, &Options{})
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
|
@ -3100,7 +3072,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
|||
0,
|
||||
0,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
|
@ -3141,7 +3113,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false, false, nil)
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{})
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
|
||||
|
|
|
@ -351,7 +351,7 @@ func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels,
|
|||
// PopulateLabels builds a label set from the given label set and scrape configuration.
|
||||
// It returns a label set before relabeling was applied as the second return value.
|
||||
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
|
||||
func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
|
||||
func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) {
|
||||
// Copy labels into the labelset for the target if they are not set already.
|
||||
scrapeLabels := []labels.Label{
|
||||
{Name: model.JobLabel, Value: cfg.JobName},
|
||||
|
@ -374,7 +374,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
}
|
||||
}
|
||||
|
||||
preRelabelLabels := lb.Labels()
|
||||
preRelabelLabels := lb.Labels(nil)
|
||||
lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...)
|
||||
|
||||
// Check if the target was dropped.
|
||||
|
@ -389,21 +389,25 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
|
||||
// addPort checks whether we should add a default port to the address.
|
||||
// If the address is not valid, we don't append a port either.
|
||||
addPort := func(s string) bool {
|
||||
addPort := func(s string) (string, string, bool) {
|
||||
// If we can split, a port exists and we don't have to add one.
|
||||
if _, _, err := net.SplitHostPort(s); err == nil {
|
||||
return false
|
||||
if host, port, err := net.SplitHostPort(s); err == nil {
|
||||
return host, port, false
|
||||
}
|
||||
// If adding a port makes it valid, the previous error
|
||||
// was not due to an invalid address and we can append a port.
|
||||
_, _, err := net.SplitHostPort(s + ":1234")
|
||||
return err == nil
|
||||
return "", "", err == nil
|
||||
}
|
||||
|
||||
addr := lset.Get(model.AddressLabel)
|
||||
// If it's an address with no trailing port, infer it based on the used scheme.
|
||||
if addPort(addr) {
|
||||
scheme := lset.Get(model.SchemeLabel)
|
||||
host, port, add := addPort(addr)
|
||||
// If it's an address with no trailing port, infer it based on the used scheme
|
||||
// unless the no-default-scrape-port feature flag is present.
|
||||
if !noDefaultPort && add {
|
||||
// Addresses reaching this point are already wrapped in [] if necessary.
|
||||
switch lset.Get(model.SchemeLabel) {
|
||||
switch scheme {
|
||||
case "http", "":
|
||||
addr = addr + ":80"
|
||||
case "https":
|
||||
|
@ -414,6 +418,21 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
lb.Set(model.AddressLabel, addr)
|
||||
}
|
||||
|
||||
if noDefaultPort {
|
||||
// If it's an address with a trailing default port and the
|
||||
// no-default-scrape-port flag is present, remove the port.
|
||||
switch port {
|
||||
case "80":
|
||||
if scheme == "http" {
|
||||
lb.Set(model.AddressLabel, host)
|
||||
}
|
||||
case "443":
|
||||
if scheme == "https" {
|
||||
lb.Set(model.AddressLabel, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -453,7 +472,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
lb.Set(model.InstanceLabel, addr)
|
||||
}
|
||||
|
||||
res = lb.Labels()
|
||||
res = lb.Labels(nil)
|
||||
for _, l := range res {
|
||||
// Check label values are valid, drop the target if not.
|
||||
if !model.LabelValue(l.Value).IsValid() {
|
||||
|
@ -464,7 +483,7 @@ func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig lab
|
|||
}
|
||||
|
||||
// TargetsFromGroup builds targets based on the given TargetGroup and config.
|
||||
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) {
|
||||
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool) ([]*Target, []error) {
|
||||
targets := make([]*Target, 0, len(tg.Targets))
|
||||
failures := []error{}
|
||||
|
||||
|
@ -482,7 +501,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe
|
|||
|
||||
lset := labels.New(lbls...)
|
||||
|
||||
lbls, origLabels, err := PopulateLabels(lset, cfg)
|
||||
lbls, origLabels, err := PopulateLabels(lset, cfg, noDefaultPort)
|
||||
if err != nil {
|
||||
failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg))
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ func newTestTarget(targetURL string, deadline time.Duration, lbls labels.Labels)
|
|||
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
|
||||
lb.Set(model.MetricsPathLabel, "/metrics")
|
||||
|
||||
return &Target{labels: lb.Labels()}
|
||||
return &Target{labels: lb.Labels(nil)}
|
||||
}
|
||||
|
||||
func TestNewHTTPBearerToken(t *testing.T) {
|
||||
|
@ -375,7 +375,7 @@ func TestTargetsFromGroup(t *testing.T) {
|
|||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
}
|
||||
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg)
|
||||
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false)
|
||||
if len(targets) != 1 {
|
||||
t.Fatalf("Expected 1 target, got %v", len(targets))
|
||||
}
|
||||
|
|
|
@ -6,6 +6,11 @@ set -euo pipefail
|
|||
|
||||
cd web/ui
|
||||
cp embed.go.tmpl embed.go
|
||||
|
||||
GZIP_OPTS="-fk"
|
||||
# gzip option '-k' may not always exist in the latest gzip available on different distros.
|
||||
if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-f"; fi
|
||||
|
||||
find static -type f -name '*.gz' -delete
|
||||
find static -type f -exec gzip -fk '{}' \; -print0 | xargs -0 -I % echo %.gz | xargs echo //go:embed >> embed.go
|
||||
find static -type f -exec gzip $GZIP_OPTS '{}' \; -print0 | xargs -0 -I % echo %.gz | xargs echo //go:embed >> embed.go
|
||||
echo var EmbedFS embed.FS >> embed.go
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue