mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-25 13:44:05 -08:00
Merge branch 'main' into template_reference
Signed-off-by: Julien Pivotto <roidelapluie@o11y.eu>
This commit is contained in:
commit
a8cdb7eb6c
|
@ -1,141 +1,33 @@
|
||||||
---
|
---
|
||||||
|
# Prometheus has switched to GitHub action.
|
||||||
|
# Circle CI is not disabled repository-wise so that previous pull requests
|
||||||
|
# continue working.
|
||||||
|
# This file does not generate any CircleCI workflow.
|
||||||
|
|
||||||
version: 2.1
|
version: 2.1
|
||||||
|
|
||||||
orbs:
|
|
||||||
prometheus: prometheus/prometheus@0.4.0
|
|
||||||
go: circleci/go@0.2.0
|
|
||||||
win: circleci/windows@2.3.0
|
|
||||||
|
|
||||||
executors:
|
executors:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
|
||||||
# should also be updated.
|
|
||||||
golang:
|
golang:
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/golang:1.13-node
|
- image: busybox
|
||||||
|
|
||||||
fuzzit:
|
|
||||||
docker:
|
|
||||||
- image: fuzzitdev/golang:1.12.7-buster
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
noopjob:
|
||||||
executor: golang
|
executor: golang
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- prometheus/setup_environment
|
- run:
|
||||||
- go/load-cache:
|
command: "true"
|
||||||
key: v1
|
|
||||||
- restore_cache:
|
|
||||||
keys:
|
|
||||||
- v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
|
|
||||||
- v1-npm-deps-
|
|
||||||
- run:
|
|
||||||
command: make
|
|
||||||
environment:
|
|
||||||
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
|
|
||||||
GOGC: "20"
|
|
||||||
# By default Go uses GOMAXPROCS but a Circle CI executor has many
|
|
||||||
# cores (> 30) while the CPU and RAM resources are throttled. If we
|
|
||||||
# don't limit this to the number of allocated cores, the job is
|
|
||||||
# likely to get OOMed and killed.
|
|
||||||
GOOPTS: "-p 2"
|
|
||||||
GOMAXPROCS: "2"
|
|
||||||
- prometheus/check_proto
|
|
||||||
- prometheus/store_artifact:
|
|
||||||
file: prometheus
|
|
||||||
- prometheus/store_artifact:
|
|
||||||
file: promtool
|
|
||||||
- go/save-cache:
|
|
||||||
key: v1
|
|
||||||
- save_cache:
|
|
||||||
key: v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
|
|
||||||
paths:
|
|
||||||
- web/ui/react-app/node_modules
|
|
||||||
test_windows:
|
|
||||||
executor: win/default
|
|
||||||
working_directory: /go/src/github.com/prometheus/prometheus
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
shell: bash
|
|
||||||
command: |
|
|
||||||
(cd web/ui && GOOS= GOARCH= go generate -mod=vendor)
|
|
||||||
go test -mod=vendor -test.v `go list ./...|grep -Exv "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"`
|
|
||||||
environment:
|
|
||||||
GOGC: "20"
|
|
||||||
GOOPTS: "-p 2 -mod=vendor"
|
|
||||||
fuzzit_regression:
|
|
||||||
executor: fuzzit
|
|
||||||
working_directory: /go/src/github.com/prometheus/prometheus
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run: ./fuzzit.sh local-regression
|
|
||||||
fuzzit_fuzzing:
|
|
||||||
executor: fuzzit
|
|
||||||
working_directory: /go/src/github.com/prometheus/prometheus
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- setup_remote_docker
|
|
||||||
- run: ./fuzzit.sh fuzzing
|
|
||||||
|
|
||||||
makefile_sync:
|
|
||||||
executor: golang
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run: ./scripts/sync_makefiles.sh
|
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
prometheus:
|
prometheus:
|
||||||
jobs:
|
jobs:
|
||||||
- test:
|
- noopjob
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- test_windows:
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- fuzzit_regression:
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- prometheus/build:
|
|
||||||
name: build
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /.*/
|
|
||||||
- prometheus/publish_master:
|
|
||||||
context: org-context
|
|
||||||
requires:
|
|
||||||
- test
|
|
||||||
- build
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only: master
|
|
||||||
image: circleci/golang:1-node
|
|
||||||
- prometheus/publish_release:
|
|
||||||
context: org-context
|
|
||||||
requires:
|
|
||||||
- test
|
|
||||||
- build
|
|
||||||
filters:
|
|
||||||
tags:
|
|
||||||
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
|
|
||||||
branches:
|
|
||||||
ignore: /.*/
|
|
||||||
image: circleci/golang:1-node
|
|
||||||
nightly:
|
|
||||||
triggers:
|
triggers:
|
||||||
- schedule:
|
- schedule:
|
||||||
cron: "0 0 * * *"
|
cron: "0 0 30 2 *"
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- main
|
||||||
jobs:
|
|
||||||
- makefile_sync:
|
|
||||||
context: org-context
|
|
||||||
- fuzzit_fuzzing:
|
|
||||||
context: org-context
|
|
||||||
|
|
|
@ -5,4 +5,5 @@ data/
|
||||||
!.build/linux-amd64/
|
!.build/linux-amd64/
|
||||||
!.build/linux-armv7/
|
!.build/linux-armv7/
|
||||||
!.build/linux-arm64/
|
!.build/linux-arm64/
|
||||||
|
!.build/linux-ppc64le/
|
||||||
!.build/linux-s390x/
|
!.build/linux-s390x/
|
||||||
|
|
8
.github/CODEOWNERS
vendored
Normal file
8
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
/web/ui @juliusv
|
||||||
|
/web/ui/module @juliusv @nexucis
|
||||||
|
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
||||||
|
/storage/remote/otlptranslator @gouthamve @jesusvazquez
|
||||||
|
/discovery/kubernetes @brancz
|
||||||
|
/tsdb @jesusvazquez
|
||||||
|
/promql @roidelapluie
|
||||||
|
/cmd/promtool @dgl
|
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
56
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,56 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve.
|
|
||||||
title: ''
|
|
||||||
labels: kind/bug
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Please do *NOT* ask usage questions in Github issues.
|
|
||||||
|
|
||||||
If your issue is not a feature request or bug report use:
|
|
||||||
https://groups.google.com/forum/#!forum/prometheus-users. If
|
|
||||||
you are unsure whether you hit a bug, search and ask in the
|
|
||||||
mailing list first.
|
|
||||||
|
|
||||||
You can find more information at: https://prometheus.io/community/
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
**What did you do?**
|
|
||||||
|
|
||||||
**What did you expect to see?**
|
|
||||||
|
|
||||||
**What did you see instead? Under which circumstances?**
|
|
||||||
|
|
||||||
**Environment**
|
|
||||||
|
|
||||||
* System information:
|
|
||||||
|
|
||||||
insert output of `uname -srm` here
|
|
||||||
|
|
||||||
* Prometheus version:
|
|
||||||
|
|
||||||
insert output of `prometheus --version` here
|
|
||||||
|
|
||||||
* Alertmanager version:
|
|
||||||
|
|
||||||
insert output of `alertmanager --version` here (if relevant to the issue)
|
|
||||||
|
|
||||||
* Prometheus configuration file:
|
|
||||||
```
|
|
||||||
insert configuration here
|
|
||||||
```
|
|
||||||
|
|
||||||
* Alertmanager configuration file:
|
|
||||||
```
|
|
||||||
insert configuration here (if relevant to the issue)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
* Logs:
|
|
||||||
```
|
|
||||||
insert Prometheus and Alertmanager logs relevant to the issue here
|
|
||||||
```
|
|
74
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
74
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
description: Create a report to help us improve.
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
Thank you for opening a bug report for Prometheus.
|
||||||
|
|
||||||
|
Please do *NOT* ask support questions in Github issues.
|
||||||
|
|
||||||
|
If your issue is not a feature request or bug report use our [community support](https://prometheus.io/community/).
|
||||||
|
|
||||||
|
There is also [commercial support](https://prometheus.io/support-training/) available.
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: What did you do?
|
||||||
|
description: Please provide steps for us to reproduce this issue.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: What did you expect to see?
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: What did you see instead? Under which circumstances?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
## Environment
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: System information
|
||||||
|
description: insert output of `uname -srm` here, or operating system version
|
||||||
|
placeholder: e.g. Linux 5.16.15 x86_64
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Prometheus version
|
||||||
|
description: Insert output of `prometheus --version` here.
|
||||||
|
render: text
|
||||||
|
placeholder: |
|
||||||
|
e.g. prometheus, version 2.23.0 (branch: HEAD, revision: 26d89b4b0776fe4cd5a3656dfa520f119a375273)
|
||||||
|
build user: root@37609b3a0a21
|
||||||
|
build date: 20201126-10:56:17
|
||||||
|
go version: go1.15.5
|
||||||
|
platform: linux/amd64
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Prometheus configuration file
|
||||||
|
description: Insert relevant configuration here. Don't forget to remove secrets.
|
||||||
|
render: yaml
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Alertmanager version
|
||||||
|
description: Insert output of `alertmanager --version` here (if relevant to the issue).
|
||||||
|
render: text
|
||||||
|
placeholder: |
|
||||||
|
e.g. alertmanager, version 0.22.2 (branch: HEAD, revision: 44f8adc06af5101ad64bd8b9c8b18273f2922051)
|
||||||
|
build user: root@b595c7f32520
|
||||||
|
build date: 20210602-07:50:37
|
||||||
|
go version: go1.16.4
|
||||||
|
platform: linux/amd64
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Alertmanager configuration file
|
||||||
|
description: Insert relevant configuration here. Don't forget to remove secrets.
|
||||||
|
render: yaml
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Logs
|
||||||
|
description: Insert Prometheus and Alertmanager logs relevant to the issue here.
|
||||||
|
render: text
|
10
.github/ISSUE_TEMPLATE/config.yml
vendored
10
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,8 +1,8 @@
|
||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Users mailing list
|
- name: Prometheus Community Support
|
||||||
url: https://groups.google.com/forum/#!forum/prometheus-users
|
|
||||||
about: Please ask and answer usage questions here.
|
|
||||||
- name: Prometheus community
|
|
||||||
url: https://prometheus.io/community/
|
url: https://prometheus.io/community/
|
||||||
about: List of communication channels for the Prometheus community.
|
about: If you need help or support, please request help here.
|
||||||
|
- name: Commercial Support & Training
|
||||||
|
url: https://prometheus.io/support-training/
|
||||||
|
about: If you want commercial support or training, vendors are listed here.
|
||||||
|
|
24
.github/ISSUE_TEMPLATE/feature_request.md
vendored
24
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,24 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project.
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Please do *NOT* ask usage questions in Github issues.
|
|
||||||
|
|
||||||
If your issue is not a feature request or bug report use:
|
|
||||||
https://groups.google.com/forum/#!forum/prometheus-users. If
|
|
||||||
you are unsure whether you hit a bug, search and ask in the
|
|
||||||
mailing list first.
|
|
||||||
|
|
||||||
You can find more information at: https://prometheus.io/community/
|
|
||||||
|
|
||||||
-->
|
|
||||||
## Proposal
|
|
||||||
**Use case. Why is this important?**
|
|
||||||
|
|
||||||
*“Nice to have” is not a good use case. :)*
|
|
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
description: Suggest an idea for this project.
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: >-
|
||||||
|
Please do *NOT* ask support questions in Github issues.
|
||||||
|
|
||||||
|
|
||||||
|
If your issue is not a feature request or bug report use
|
||||||
|
our [community support](https://prometheus.io/community/).
|
||||||
|
|
||||||
|
|
||||||
|
There is also [commercial
|
||||||
|
support](https://prometheus.io/support-training/) available.
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Proposal
|
||||||
|
description: Use case. Why is this important?
|
||||||
|
placeholder: “Nice to have” is not a good use case. :)
|
||||||
|
validations:
|
||||||
|
required: true
|
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -1,15 +1,17 @@
|
||||||
<!--
|
<!--
|
||||||
Don't forget!
|
Don't forget!
|
||||||
|
|
||||||
|
- Please sign CNCF's Developer Certificate of Origin and sign-off your commits by adding the -s / --signoff flag to `git commit`. See https://github.com/apps/dco for more information.
|
||||||
|
|
||||||
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
||||||
|
|
||||||
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
||||||
|
|
||||||
- No tests are needed for internal implementation changes.
|
- No tests are needed for internal implementation changes.
|
||||||
|
|
||||||
- Performance improvements would need a benchmark test to prove it.
|
- Performance improvements would need a benchmark test to prove it.
|
||||||
|
|
||||||
- All exposed objects should have a comment.
|
- All exposed objects should have a comment.
|
||||||
|
|
||||||
- All comments should start with a capital letter and end with a full stop.
|
- All comments should start with a capital letter and end with a full stop.
|
||||||
-->
|
-->
|
||||||
|
|
33
.github/dependabot.yml
vendored
Normal file
33
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "gomod"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
|
groups:
|
||||||
|
k8s.io:
|
||||||
|
patterns:
|
||||||
|
- "k8s.io/*"
|
||||||
|
go.opentelemetry.io:
|
||||||
|
patterns:
|
||||||
|
- "go.opentelemetry.io/*"
|
||||||
|
- package-ecosystem: "gomod"
|
||||||
|
directory: "/documentation/examples/remote_storage"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
|
- package-ecosystem: "npm"
|
||||||
|
directory: "/web/ui"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/scripts"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
|
- package-ecosystem: "docker"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
35
.github/lock.yml
vendored
35
.github/lock.yml
vendored
|
@ -1,35 +0,0 @@
|
||||||
# Configuration for Lock Threads - https://github.com/dessant/lock-threads
|
|
||||||
|
|
||||||
# Number of days of inactivity before a closed issue or pull request is locked
|
|
||||||
daysUntilLock: 180
|
|
||||||
|
|
||||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
|
||||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
|
||||||
skipCreatedBefore: false
|
|
||||||
|
|
||||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
|
||||||
exemptLabels: []
|
|
||||||
|
|
||||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
|
||||||
lockLabel: false
|
|
||||||
|
|
||||||
# Comment to post before locking. Set to `false` to disable
|
|
||||||
lockComment: false
|
|
||||||
|
|
||||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
|
||||||
setLockReason: false
|
|
||||||
|
|
||||||
# Limit to only `issues` or `pulls`
|
|
||||||
only: issues
|
|
||||||
|
|
||||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
|
||||||
# issues:
|
|
||||||
# exemptLabels:
|
|
||||||
# - help-wanted
|
|
||||||
# lockLabel: outdated
|
|
||||||
|
|
||||||
# pulls:
|
|
||||||
# daysUntilLock: 30
|
|
||||||
|
|
||||||
# Repository to extend settings from
|
|
||||||
# _extends: repo
|
|
56
.github/stale.yml
vendored
Normal file
56
.github/stale.yml
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
# Configuration for probot-stale - https://github.com/probot/stale
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||||
|
daysUntilStale: 60
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||||
|
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||||
|
daysUntilClose: false
|
||||||
|
|
||||||
|
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||||
|
onlyLabels: []
|
||||||
|
|
||||||
|
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||||
|
exemptLabels:
|
||||||
|
- keepalive
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a project (defaults to false)
|
||||||
|
exemptProjects: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a milestone (defaults to false)
|
||||||
|
exemptMilestones: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues with an assignee (defaults to false)
|
||||||
|
exemptAssignees: false
|
||||||
|
|
||||||
|
# Label to use when marking as stale
|
||||||
|
staleLabel: stale
|
||||||
|
|
||||||
|
# Comment to post when marking as stale. Set to `false` to disable
|
||||||
|
markComment: false
|
||||||
|
|
||||||
|
# Comment to post when removing the stale label.
|
||||||
|
# unmarkComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Comment to post when closing a stale Issue or Pull Request.
|
||||||
|
# closeComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||||
|
limitPerRun: 30
|
||||||
|
|
||||||
|
# Limit to only `issues` or `pulls`
|
||||||
|
only: pulls
|
||||||
|
|
||||||
|
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||||
|
# pulls:
|
||||||
|
# daysUntilStale: 30
|
||||||
|
# markComment: >
|
||||||
|
# This pull request has been automatically marked as stale because it has not had
|
||||||
|
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
# for your contributions.
|
||||||
|
|
||||||
|
# issues:
|
||||||
|
# exemptLabels:
|
||||||
|
# - confirmed
|
25
.github/workflows/buf-lint.yml
vendored
Normal file
25
.github/workflows/buf-lint.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
name: buf.build
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- ".github/workflows/buf-lint.yml"
|
||||||
|
- "**.proto"
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
buf:
|
||||||
|
name: lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
|
||||||
|
with:
|
||||||
|
input: 'prompb'
|
||||||
|
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
|
||||||
|
with:
|
||||||
|
input: 'prompb'
|
||||||
|
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'
|
29
.github/workflows/buf.yml
vendored
Normal file
29
.github/workflows/buf.yml
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
name: buf.build
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
buf:
|
||||||
|
name: lint and publish
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
|
||||||
|
with:
|
||||||
|
input: 'prompb'
|
||||||
|
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
|
||||||
|
with:
|
||||||
|
input: 'prompb'
|
||||||
|
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
||||||
|
- uses: bufbuild/buf-push-action@342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 # v1.1.1
|
||||||
|
with:
|
||||||
|
input: 'prompb'
|
||||||
|
buf_token: ${{ secrets.BUF_TOKEN }}
|
224
.github/workflows/ci.yml
vendored
Normal file
224
.github/workflows/ci.yml
vendored
Normal file
|
@ -0,0 +1,224 @@
|
||||||
|
---
|
||||||
|
name: CI
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test_go:
|
||||||
|
name: Go tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
|
# should also be updated.
|
||||||
|
container:
|
||||||
|
image: quay.io/prometheus/golang-builder:1.21-base
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
|
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||||
|
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||||
|
- run: go test --tags=stringlabels ./...
|
||||||
|
- run: GOARCH=386 go test ./cmd/prometheus
|
||||||
|
- run: make -C documentation/examples/remote_storage
|
||||||
|
- run: make -C documentation/examples
|
||||||
|
- uses: ./.github/promci/actions/check_proto
|
||||||
|
with:
|
||||||
|
version: "3.15.8"
|
||||||
|
|
||||||
|
test_ui:
|
||||||
|
name: UI tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
|
# should also be updated.
|
||||||
|
container:
|
||||||
|
image: quay.io/prometheus/golang-builder:1.21-base
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
|
with:
|
||||||
|
enable_go: false
|
||||||
|
enable_npm: true
|
||||||
|
- run: make assets-tarball
|
||||||
|
- run: make ui-lint
|
||||||
|
- run: make ui-test
|
||||||
|
- uses: ./.github/promci/actions/save_artifacts
|
||||||
|
with:
|
||||||
|
directory: .tarballs
|
||||||
|
|
||||||
|
test_windows:
|
||||||
|
name: Go tests on Windows
|
||||||
|
runs-on: windows-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
|
with:
|
||||||
|
go-version: '>=1.21 <1.22'
|
||||||
|
- run: |
|
||||||
|
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||||
|
go test $TestTargets -vet=off -v
|
||||||
|
shell: powershell
|
||||||
|
|
||||||
|
test_golang_oldest:
|
||||||
|
name: Go tests with previous Go version
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# The go verson in this image should be N-1 wrt test_go.
|
||||||
|
container:
|
||||||
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- run: make build
|
||||||
|
- run: go test ./tsdb/...
|
||||||
|
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||||
|
|
||||||
|
test_mixins:
|
||||||
|
name: Mixins tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
|
# should also be updated.
|
||||||
|
container:
|
||||||
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- run: go install ./cmd/promtool/.
|
||||||
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||||
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||||
|
- run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||||
|
- run: make -C documentation/prometheus-mixin clean
|
||||||
|
- run: make -C documentation/prometheus-mixin jb_install
|
||||||
|
- run: make -C documentation/prometheus-mixin
|
||||||
|
- run: git diff --exit-code
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build Prometheus for common architectures
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: |
|
||||||
|
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
&&
|
||||||
|
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||||
|
&&
|
||||||
|
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
thread: [ 0, 1, 2 ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/build
|
||||||
|
with:
|
||||||
|
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||||
|
parallelism: 3
|
||||||
|
thread: ${{ matrix.thread }}
|
||||||
|
build_all:
|
||||||
|
name: Build Prometheus for all architectures
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: |
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
||
|
||||||
|
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||||
|
||
|
||||||
|
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
thread: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ]
|
||||||
|
|
||||||
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
|
# should also be updated.
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/build
|
||||||
|
with:
|
||||||
|
parallelism: 12
|
||||||
|
thread: ${{ matrix.thread }}
|
||||||
|
golangci:
|
||||||
|
name: golangci-lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- name: Install Go
|
||||||
|
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
|
with:
|
||||||
|
go-version: 1.20.x
|
||||||
|
- name: Install snmp_exporter/generator dependencies
|
||||||
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
|
- name: Lint
|
||||||
|
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
|
||||||
|
with:
|
||||||
|
args: --verbose
|
||||||
|
version: v1.54.2
|
||||||
|
fuzzing:
|
||||||
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
codeql:
|
||||||
|
uses: ./.github/workflows/codeql-analysis.yml
|
||||||
|
|
||||||
|
publish_main:
|
||||||
|
name: Publish main branch artifacts
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
||||||
|
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/publish_main
|
||||||
|
with:
|
||||||
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
|
docker_hub_password: ${{ secrets.docker_hub_password }}
|
||||||
|
quay_io_login: ${{ secrets.quay_io_login }}
|
||||||
|
quay_io_password: ${{ secrets.quay_io_password }}
|
||||||
|
publish_release:
|
||||||
|
name: Publish release artefacts
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
||||||
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- uses: ./.github/promci/actions/publish_release
|
||||||
|
with:
|
||||||
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
|
docker_hub_password: ${{ secrets.docker_hub_password }}
|
||||||
|
quay_io_login: ${{ secrets.quay_io_login }}
|
||||||
|
quay_io_password: ${{ secrets.quay_io_password }}
|
||||||
|
github_token: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||||
|
publish_ui_release:
|
||||||
|
name: Publish UI on npm Registry
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [test_ui, codeql]
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
|
- name: Install nodejs
|
||||||
|
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||||
|
with:
|
||||||
|
node-version-file: "web/ui/.nvmrc"
|
||||||
|
registry-url: "https://registry.npmjs.org"
|
||||||
|
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||||
|
with:
|
||||||
|
path: ~/.npm
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
- name: Check libraries version
|
||||||
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||||
|
run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)"
|
||||||
|
- name: build
|
||||||
|
run: make assets
|
||||||
|
- name: Copy files before publishing libs
|
||||||
|
run: ./scripts/ui_release.sh --copy
|
||||||
|
- name: Publish dry-run libraries
|
||||||
|
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))"
|
||||||
|
run: ./scripts/ui_release.sh --publish dry-run
|
||||||
|
- name: Publish libraries
|
||||||
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||||
|
run: ./scripts/ui_release.sh --publish
|
||||||
|
env:
|
||||||
|
# The setup-node action writes an .npmrc file with this env variable
|
||||||
|
# as the placeholder for the auth token
|
||||||
|
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
41
.github/workflows/codeql-analysis.yml
vendored
Normal file
41
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
---
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
schedule:
|
||||||
|
- cron: "26 14 * * 1"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: ["go", "javascript"]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
|
with:
|
||||||
|
go-version: '>=1.21 <1.22'
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@00e563ead9f72a8461b24876bee2d0c2e8bd2ee8 # v2.21.5
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@00e563ead9f72a8461b24876bee2d0c2e8bd2ee8 # v2.21.5
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@00e563ead9f72a8461b24876bee2d0c2e8bd2ee8 # v2.21.5
|
61
.github/workflows/funcbench.yml
vendored
Normal file
61
.github/workflows/funcbench.yml
vendored
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
on:
|
||||||
|
repository_dispatch:
|
||||||
|
types: [funcbench_start]
|
||||||
|
name: Funcbench Workflow
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run_funcbench:
|
||||||
|
name: Running funcbench
|
||||||
|
if: github.event.action == 'funcbench_start'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
|
||||||
|
BRANCH: ${{ github.event.client_payload.BRANCH }}
|
||||||
|
BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }}
|
||||||
|
PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }}
|
||||||
|
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||||
|
GITHUB_ORG: prometheus
|
||||||
|
GITHUB_REPO: prometheus
|
||||||
|
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
|
||||||
|
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
||||||
|
GKE_PROJECT_ID: macro-mile-203600
|
||||||
|
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||||
|
PROVIDER: gke
|
||||||
|
ZONE: europe-west3-a
|
||||||
|
steps:
|
||||||
|
- name: Update status to pending
|
||||||
|
run: >-
|
||||||
|
curl -i -X POST
|
||||||
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
--data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
|
- name: Prepare nodepool
|
||||||
|
uses: docker://prominfra/funcbench:master
|
||||||
|
with:
|
||||||
|
entrypoint: "docker_entrypoint"
|
||||||
|
args: make deploy
|
||||||
|
- name: Delete all resources
|
||||||
|
if: always()
|
||||||
|
uses: docker://prominfra/funcbench:master
|
||||||
|
with:
|
||||||
|
entrypoint: "docker_entrypoint"
|
||||||
|
args: make clean
|
||||||
|
- name: Update status to failure
|
||||||
|
if: failure()
|
||||||
|
run: >-
|
||||||
|
curl -i -X POST
|
||||||
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
--data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
|
- name: Update status to success
|
||||||
|
if: success()
|
||||||
|
run: >-
|
||||||
|
curl -i -X POST
|
||||||
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
|
-H "Content-Type: application/json"
|
||||||
|
--data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
28
.github/workflows/fuzzing.yml
vendored
Normal file
28
.github/workflows/fuzzing.yml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
name: CIFuzz
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
Fuzzing:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Build Fuzzers
|
||||||
|
id: build
|
||||||
|
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
|
||||||
|
with:
|
||||||
|
oss-fuzz-project-name: "prometheus"
|
||||||
|
dry-run: false
|
||||||
|
- name: Run Fuzzers
|
||||||
|
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
||||||
|
with:
|
||||||
|
oss-fuzz-project-name: "prometheus"
|
||||||
|
fuzz-seconds: 600
|
||||||
|
dry-run: false
|
||||||
|
- name: Upload Crash
|
||||||
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
|
if: failure() && steps.build.outcome == 'success'
|
||||||
|
with:
|
||||||
|
name: artifacts
|
||||||
|
path: ./out/artifacts
|
23
.github/workflows/lock.yml
vendored
Normal file
23
.github/workflows/lock.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
name: 'Lock Threads'
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '13 23 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: lock
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
action:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
|
steps:
|
||||||
|
- uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1
|
||||||
|
with:
|
||||||
|
process-only: 'issues'
|
||||||
|
issue-inactive-days: '180'
|
||||||
|
github-token: ${{ secrets.PROMBOT_LOCKTHREADS_TOKEN }}
|
264
.github/workflows/prombench.yml
vendored
264
.github/workflows/prombench.yml
vendored
|
@ -1,172 +1,126 @@
|
||||||
on: repository_dispatch
|
on:
|
||||||
|
repository_dispatch:
|
||||||
|
types: [prombench_start, prombench_restart, prombench_stop]
|
||||||
name: Prombench Workflow
|
name: Prombench Workflow
|
||||||
|
env:
|
||||||
|
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
|
||||||
|
CLUSTER_NAME: test-infra
|
||||||
|
DOMAIN_NAME: prombench.prometheus.io
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
GITHUB_ORG: prometheus
|
||||||
|
GITHUB_REPO: prometheus
|
||||||
|
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
|
||||||
|
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
||||||
|
GKE_PROJECT_ID: macro-mile-203600
|
||||||
|
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||||
|
PROVIDER: gke
|
||||||
|
RELEASE: ${{ github.event.client_payload.RELEASE }}
|
||||||
|
ZONE: europe-west3-a
|
||||||
jobs:
|
jobs:
|
||||||
benchmark_start:
|
benchmark_start:
|
||||||
name: Benchmark Start
|
name: Benchmark Start
|
||||||
if: github.event.action == 'prombench_start'
|
if: github.event.action == 'prombench_start'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Update status to pending
|
- name: Update status to pending
|
||||||
env:
|
run: >-
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
curl -i -X POST
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
run: >-
|
-H "Content-Type: application/json"
|
||||||
curl -i -X POST
|
--data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
-H "Content-Type: application/json"
|
- name: Run make deploy to start test
|
||||||
--data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
id: make_deploy
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
uses: docker://prominfra/prombench:master
|
||||||
- name: Run make deploy to start test
|
with:
|
||||||
id: make_deploy
|
args: >-
|
||||||
uses: docker://prombench/prombench:2.0.2
|
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
|
||||||
env:
|
make deploy;
|
||||||
AUTH_FILE: ${{ secrets.PROMBENCH_GKE_AUTH }}
|
- name: Update status to failure
|
||||||
PROJECT_ID: macro-mile-203600
|
if: failure()
|
||||||
CLUSTER_NAME: prombench
|
run: >-
|
||||||
ZONE: europe-west3-a
|
curl -i -X POST
|
||||||
DOMAIN_NAME: prombench.prometheus.io
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
-H "Content-Type: application/json"
|
||||||
GITHUB_ORG: prometheus
|
--data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
GITHUB_REPO: prometheus
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
- name: Update status to success
|
||||||
RELEASE: ${{ github.event.client_payload.RELEASE }}
|
if: success()
|
||||||
with:
|
run: >-
|
||||||
args: >-
|
curl -i -X POST
|
||||||
until make all_nodepools_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
make deploy;
|
-H "Content-Type: application/json"
|
||||||
- name: Update status to failure
|
--data '{"state":"success", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
if: failure()
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
|
||||||
run: >-
|
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
- name: Update status to success
|
|
||||||
if: success()
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
|
||||||
run: >-
|
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"success", "context": "prombench-status-update-start", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
#############################
|
|
||||||
# Jobs for stopping benchmark
|
|
||||||
#############################
|
|
||||||
benchmark_cancel:
|
benchmark_cancel:
|
||||||
name: Benchmark Cancel
|
name: Benchmark Cancel
|
||||||
if: github.event.action == 'prombench_stop'
|
if: github.event.action == 'prombench_stop'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Update status to pending
|
- name: Update status to pending
|
||||||
env:
|
run: >-
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
curl -i -X POST
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
run: >-
|
-H "Content-Type: application/json"
|
||||||
curl -i -X POST
|
--data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
-H "Content-Type: application/json"
|
- name: Run make clean to stop test
|
||||||
--data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
id: make_clean
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
uses: docker://prominfra/prombench:master
|
||||||
- name: Run make clean to stop test
|
with:
|
||||||
id: make_clean
|
args: >-
|
||||||
uses: docker://prombench/prombench:2.0.2
|
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
|
||||||
env:
|
make clean;
|
||||||
AUTH_FILE: ${{ secrets.PROMBENCH_GKE_AUTH }}
|
- name: Update status to failure
|
||||||
PROJECT_ID: macro-mile-203600
|
if: failure()
|
||||||
CLUSTER_NAME: prombench
|
run: >-
|
||||||
ZONE: europe-west3-a
|
curl -i -X POST
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
-H "Content-Type: application/json"
|
||||||
with:
|
--data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
args: >-
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
until make all_nodepools_running; do echo "waiting for nodepools to be created"; sleep 10; done;
|
- name: Update status to success
|
||||||
make clean;
|
if: success()
|
||||||
- name: Update status to failure
|
run: >-
|
||||||
if: failure()
|
curl -i -X POST
|
||||||
env:
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
-H "Content-Type: application/json"
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
--data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
run: >-
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
- name: Update status to success
|
|
||||||
if: success()
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
|
||||||
run: >-
|
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
###############################
|
|
||||||
# Jobs for restarting benchmark
|
|
||||||
###############################
|
|
||||||
benchmark_restart:
|
benchmark_restart:
|
||||||
name: Benchmark Restart
|
name: Benchmark Restart
|
||||||
if: github.event.action == 'prombench_restart'
|
if: github.event.action == 'prombench_restart'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Update status to pending
|
- name: Update status to pending
|
||||||
env:
|
run: >-
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
curl -i -X POST
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
run: >-
|
-H "Content-Type: application/json"
|
||||||
curl -i -X POST
|
--data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
-H "Content-Type: application/json"
|
- name: Run make clean then make deploy to restart test
|
||||||
--data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
id: make_restart
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
uses: docker://prominfra/prombench:master
|
||||||
- name: Run make clean then make deploy to restart test
|
with:
|
||||||
id: make_restart
|
args: >-
|
||||||
uses: docker://prombench/prombench:2.0.2
|
until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done;
|
||||||
env:
|
make clean;
|
||||||
AUTH_FILE: ${{ secrets.PROMBENCH_GKE_AUTH }}
|
until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
|
||||||
PROJECT_ID: macro-mile-203600
|
make deploy;
|
||||||
CLUSTER_NAME: prombench
|
- name: Update status to failure
|
||||||
ZONE: europe-west3-a
|
if: failure()
|
||||||
DOMAIN_NAME: prombench.prometheus.io
|
run: >-
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
curl -i -X POST
|
||||||
GITHUB_ORG: prometheus
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
GITHUB_REPO: prometheus
|
-H "Content-Type: application/json"
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
--data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
RELEASE: ${{ github.event.client_payload.RELEASE }}
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
with:
|
- name: Update status to success
|
||||||
args: >-
|
if: success()
|
||||||
until make all_nodepools_running; do echo "waiting for nodepools to be created"; sleep 10; done;
|
run: >-
|
||||||
make clean;
|
curl -i -X POST
|
||||||
until make all_nodepools_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done;
|
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||||
make deploy;
|
-H "Content-Type: application/json"
|
||||||
- name: Update status to failure
|
--data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}'
|
||||||
if: failure()
|
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
|
||||||
run: >-
|
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
- name: Update status to success
|
|
||||||
if: success()
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
|
||||||
run: >-
|
|
||||||
curl -i -X POST
|
|
||||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
|
||||||
-H "Content-Type: application/json"
|
|
||||||
--data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "https://github.com/'$GITHUB_REPOSITORY'/actions"}'
|
|
||||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
|
||||||
|
|
19
.github/workflows/repo_sync.yml
vendored
Normal file
19
.github/workflows/repo_sync.yml
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
name: Sync repo files
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '44 17 * * *'
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
repo_sync:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
|
container:
|
||||||
|
image: quay.io/prometheus/golang-builder
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
|
||||||
|
- run: ./scripts/sync_repo_files.sh
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
16
.gitignore
vendored
16
.gitignore
vendored
|
@ -6,18 +6,28 @@
|
||||||
|
|
||||||
/prometheus
|
/prometheus
|
||||||
/promtool
|
/promtool
|
||||||
/tsdb/tsdb
|
|
||||||
benchmark.txt
|
benchmark.txt
|
||||||
/data
|
/data
|
||||||
|
/data-agent
|
||||||
/cmd/prometheus/data
|
/cmd/prometheus/data
|
||||||
|
/cmd/prometheus/data-agent
|
||||||
/cmd/prometheus/debug
|
/cmd/prometheus/debug
|
||||||
|
/benchout
|
||||||
|
/cmd/promtool/data
|
||||||
|
|
||||||
!/.travis.yml
|
!/.travis.yml
|
||||||
!/.promu.yml
|
!/.promu.yml
|
||||||
!/.golangci.yml
|
!/.golangci.yml
|
||||||
/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter
|
/documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter
|
||||||
/documentation/examples/remote_storage/example_write_adapter/example_writer_adapter
|
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
||||||
|
|
||||||
npm_licenses.tar.bz2
|
npm_licenses.tar.bz2
|
||||||
/web/ui/static/react
|
/web/ui/static/react
|
||||||
/web/ui/assets_vfsdata.go
|
|
||||||
|
/vendor
|
||||||
|
/.build
|
||||||
|
|
||||||
|
/**/node_modules
|
||||||
|
|
||||||
|
# Ignore parser debug
|
||||||
|
y.output
|
||||||
|
|
15
.gitpod.Dockerfile
vendored
Normal file
15
.gitpod.Dockerfile
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
FROM gitpod/workspace-full
|
||||||
|
|
||||||
|
ENV CUSTOM_NODE_VERSION=16
|
||||||
|
ENV CUSTOM_GO_VERSION=1.19
|
||||||
|
ENV GOPATH=$HOME/go-packages
|
||||||
|
ENV GOROOT=$HOME/go
|
||||||
|
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
|
||||||
|
|
||||||
|
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
||||||
|
|
||||||
|
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
||||||
|
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
|
||||||
|
&& printf '%s\n' 'export GOPATH=/workspace/go' \
|
||||||
|
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
|
||||||
|
|
20
.gitpod.yml
Normal file
20
.gitpod.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
image:
|
||||||
|
file: .gitpod.Dockerfile
|
||||||
|
tasks:
|
||||||
|
- init:
|
||||||
|
make build
|
||||||
|
command: |
|
||||||
|
gp sync-done build
|
||||||
|
./prometheus --config.file=documentation/examples/prometheus.yml
|
||||||
|
- command: |
|
||||||
|
cd web/ui/
|
||||||
|
gp sync-await build
|
||||||
|
unset BROWSER
|
||||||
|
export DANGEROUSLY_DISABLE_HOST_CHECK=true
|
||||||
|
npm start
|
||||||
|
openMode: split-right
|
||||||
|
ports:
|
||||||
|
- port: 3000
|
||||||
|
onOpen: open-preview
|
||||||
|
- port: 9090
|
||||||
|
onOpen: ignore
|
|
@ -1,13 +1,72 @@
|
||||||
run:
|
run:
|
||||||
modules-download-mode: vendor
|
timeout: 15m
|
||||||
deadline: 5m
|
skip-files:
|
||||||
|
# Skip autogenerated files.
|
||||||
|
- ^.*\.(pb|y)\.go$
|
||||||
|
skip-dirs:
|
||||||
|
# Copied it from a different source
|
||||||
|
- storage/remote/otlptranslator/prometheusremotewrite
|
||||||
|
|
||||||
|
output:
|
||||||
|
sort-results: true
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- depguard
|
||||||
|
- gocritic
|
||||||
|
- gofumpt
|
||||||
|
- goimports
|
||||||
|
- misspell
|
||||||
|
- predeclared
|
||||||
|
- revive
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
|
max-same-issues: 0
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- path: _test.go
|
- linters:
|
||||||
linters:
|
- gocritic
|
||||||
- errcheck
|
text: "appendAssign"
|
||||||
|
- path: _test.go
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
depguard:
|
||||||
|
rules:
|
||||||
|
main:
|
||||||
|
deny:
|
||||||
|
- pkg: "sync/atomic"
|
||||||
|
desc: "Use go.uber.org/atomic instead of sync/atomic"
|
||||||
|
- pkg: "github.com/stretchr/testify/assert"
|
||||||
|
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
||||||
|
- pkg: "github.com/go-kit/kit/log"
|
||||||
|
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||||
|
- pkg: "io/ioutil"
|
||||||
|
desc: "Use corresponding 'os' or 'io' functions instead."
|
||||||
|
- pkg: "regexp"
|
||||||
|
desc: "Use github.com/grafana/regexp instead of regexp"
|
||||||
errcheck:
|
errcheck:
|
||||||
exclude: scripts/errcheck_excludes.txt
|
exclude-functions:
|
||||||
|
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
|
||||||
|
- io.Copy
|
||||||
|
# The next two are used in HTTP handlers, any error is handled by the server itself.
|
||||||
|
- io.WriteString
|
||||||
|
- (net/http.ResponseWriter).Write
|
||||||
|
# No need to check for errors on server's shutdown.
|
||||||
|
- (*net/http.Server).Shutdown
|
||||||
|
# Never check for logger errors.
|
||||||
|
- (github.com/go-kit/log.Logger).Log
|
||||||
|
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||||
|
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||||
|
goimports:
|
||||||
|
local-prefixes: github.com/prometheus/prometheus
|
||||||
|
gofumpt:
|
||||||
|
extra-rules: true
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
||||||
|
- name: unused-parameter
|
||||||
|
severity: warning
|
||||||
|
disabled: true
|
||||||
|
|
46
.promu.yml
46
.promu.yml
|
@ -1,7 +1,7 @@
|
||||||
go:
|
go:
|
||||||
# Whenever the Go version is updated here,
|
# Whenever the Go version is updated here,
|
||||||
# .circle/config.yml should also be updated.
|
# .circle/config.yml should also be updated.
|
||||||
version: 1.13
|
version: 1.21
|
||||||
repository:
|
repository:
|
||||||
path: github.com/prometheus/prometheus
|
path: github.com/prometheus/prometheus
|
||||||
build:
|
build:
|
||||||
|
@ -10,9 +10,15 @@ build:
|
||||||
path: ./cmd/prometheus
|
path: ./cmd/prometheus
|
||||||
- name: promtool
|
- name: promtool
|
||||||
path: ./cmd/promtool
|
path: ./cmd/promtool
|
||||||
- name: tsdb
|
tags:
|
||||||
path: ./tsdb/cmd/tsdb
|
all:
|
||||||
flags: -mod=vendor -a -tags netgo,builtinassets
|
- netgo
|
||||||
|
- builtinassets
|
||||||
|
- stringlabels
|
||||||
|
windows:
|
||||||
|
- builtinassets
|
||||||
|
- stringlabels
|
||||||
|
flags: -a
|
||||||
ldflags: |
|
ldflags: |
|
||||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||||
|
@ -20,6 +26,8 @@ build:
|
||||||
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||||
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||||
tarball:
|
tarball:
|
||||||
|
# Whenever there are new files to include in the tarball,
|
||||||
|
# remember to make sure the new files will be generated after `make build`.
|
||||||
files:
|
files:
|
||||||
- consoles
|
- consoles
|
||||||
- console_libraries
|
- console_libraries
|
||||||
|
@ -29,26 +37,10 @@ tarball:
|
||||||
- npm_licenses.tar.bz2
|
- npm_licenses.tar.bz2
|
||||||
crossbuild:
|
crossbuild:
|
||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
- darwin
|
||||||
- linux/386
|
- dragonfly
|
||||||
- darwin/amd64
|
- freebsd
|
||||||
- darwin/386
|
- illumos
|
||||||
- windows/amd64
|
- linux
|
||||||
- windows/386
|
- netbsd
|
||||||
- freebsd/amd64
|
- windows
|
||||||
- freebsd/386
|
|
||||||
- openbsd/amd64
|
|
||||||
- openbsd/386
|
|
||||||
- netbsd/amd64
|
|
||||||
- netbsd/386
|
|
||||||
- dragonfly/amd64
|
|
||||||
- linux/arm
|
|
||||||
- linux/arm64
|
|
||||||
- freebsd/arm
|
|
||||||
- openbsd/arm
|
|
||||||
- linux/mips64
|
|
||||||
- linux/mips64le
|
|
||||||
- netbsd/arm
|
|
||||||
- linux/ppc64
|
|
||||||
- linux/ppc64le
|
|
||||||
- linux/s390x
|
|
||||||
|
|
23
.yamllint
Normal file
23
.yamllint
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
braces:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
level: error
|
||||||
|
brackets:
|
||||||
|
max-spaces-inside: 1
|
||||||
|
level: error
|
||||||
|
commas: disable
|
||||||
|
comments: disable
|
||||||
|
comments-indentation: disable
|
||||||
|
document-start: disable
|
||||||
|
indentation:
|
||||||
|
spaces: consistent
|
||||||
|
indent-sequences: consistent
|
||||||
|
key-duplicates:
|
||||||
|
ignore: |
|
||||||
|
config/testdata/section_key_dup.bad.yml
|
||||||
|
line-length: disable
|
||||||
|
truthy:
|
||||||
|
check-keys: false
|
985
CHANGELOG.md
985
CHANGELOG.md
File diff suppressed because it is too large
Load diff
3
CODE_OF_CONDUCT.md
Normal file
3
CODE_OF_CONDUCT.md
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Prometheus Community Code of Conduct
|
||||||
|
|
||||||
|
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
|
@ -19,8 +19,7 @@ Prometheus uses GitHub to manage reviews of pull requests.
|
||||||
Practices for Production
|
Practices for Production
|
||||||
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
|
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||||
|
|
||||||
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
|
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works).
|
||||||
|
|
||||||
|
|
||||||
## Steps to Contribute
|
## Steps to Contribute
|
||||||
|
|
||||||
|
@ -28,10 +27,13 @@ Should you wish to work on an issue, please claim it first by commenting on the
|
||||||
|
|
||||||
Please check the [`low-hanging-fruit`](https://github.com/prometheus/prometheus/issues?q=is%3Aissue+is%3Aopen+label%3A%22low+hanging+fruit%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
|
Please check the [`low-hanging-fruit`](https://github.com/prometheus/prometheus/issues?q=is%3Aissue+is%3Aopen+label%3A%22low+hanging+fruit%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
|
||||||
|
|
||||||
|
You can [spin up a prebuilt dev environment](https://gitpod.io/#https://github.com/prometheus/prometheus) using Gitpod.io.
|
||||||
|
|
||||||
For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source)
|
For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source)
|
||||||
|
|
||||||
For quickly compiling and testing your changes do:
|
For quickly compiling and testing your changes do:
|
||||||
```
|
|
||||||
|
```bash
|
||||||
# For building.
|
# For building.
|
||||||
go build ./cmd/prometheus/
|
go build ./cmd/prometheus/
|
||||||
./prometheus
|
./prometheus
|
||||||
|
@ -46,38 +48,50 @@ All our issues are regularly tagged so that you can also filter down the issues
|
||||||
|
|
||||||
## Pull Request Checklist
|
## Pull Request Checklist
|
||||||
|
|
||||||
* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
|
* Branch from the main branch and, if needed, rebase to the current main branch before submitting your pull request. If it doesn't merge cleanly with main you may be asked to rebase your changes.
|
||||||
|
|
||||||
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
|
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
|
||||||
|
|
||||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
|
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on the IRC channel [#prometheus-dev](https://web.libera.chat/?channels=#prometheus-dev) on irc.libera.chat (for the easiest start, [join via Element](https://app.element.io/#/room/#prometheus-dev:matrix.org)).
|
||||||
|
|
||||||
* Add tests relevant to the fixed bug or new feature.
|
* Add tests relevant to the fixed bug or new feature.
|
||||||
|
|
||||||
## Dependency management
|
## Dependency management
|
||||||
|
|
||||||
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.13 or greater installed.
|
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages.
|
||||||
|
|
||||||
All dependencies are vendored in the `vendor/` directory.
|
|
||||||
|
|
||||||
To add or update a new dependency, use the `go get` command:
|
To add or update a new dependency, use the `go get` command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Pick the latest tagged release.
|
# Pick the latest tagged release.
|
||||||
go get example.com/some/module/pkg
|
go get example.com/some/module/pkg@latest
|
||||||
|
|
||||||
# Pick a specific version.
|
# Pick a specific version.
|
||||||
go get example.com/some/module/pkg@vX.Y.Z
|
go get example.com/some/module/pkg@vX.Y.Z
|
||||||
```
|
```
|
||||||
|
|
||||||
Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
|
Tidy up the `go.mod` and `go.sum` files:
|
||||||
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
|
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
GO111MODULE=on go mod vendor
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
|
You have to commit the changes to `go.mod` and `go.sum` before submitting the pull request.
|
||||||
|
|
||||||
|
## Working with the PromQL parser
|
||||||
|
|
||||||
|
The PromQL parser grammar is located in `promql/parser/generated_parser.y` and it can be built using `make parser`.
|
||||||
|
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
|
||||||
|
|
||||||
|
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
|
||||||
|
can modify the `./promql/parser/generated_parser.y.go` manually.
|
||||||
|
|
||||||
|
```golang
|
||||||
|
// As of writing this was somewhere around line 600.
|
||||||
|
var (
|
||||||
|
yyDebug = 0 // This can be be a number 0 -> 5.
|
||||||
|
yyErrorVerbose = false // This can be set to true.
|
||||||
|
)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
|
@ -14,14 +14,13 @@ COPY LICENSE /LICENSE
|
||||||
COPY NOTICE /NOTICE
|
COPY NOTICE /NOTICE
|
||||||
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
||||||
|
|
||||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/
|
WORKDIR /prometheus
|
||||||
RUN mkdir -p /prometheus && \
|
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
|
||||||
chown -R nobody:nogroup etc/prometheus /prometheus
|
chown -R nobody:nobody /etc/prometheus /prometheus
|
||||||
|
|
||||||
USER nobody
|
USER nobody
|
||||||
EXPOSE 9090
|
EXPOSE 9090
|
||||||
VOLUME [ "/prometheus" ]
|
VOLUME [ "/prometheus" ]
|
||||||
WORKDIR /prometheus
|
|
||||||
ENTRYPOINT [ "/bin/prometheus" ]
|
ENTRYPOINT [ "/bin/prometheus" ]
|
||||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||||
"--storage.tsdb.path=/prometheus", \
|
"--storage.tsdb.path=/prometheus", \
|
||||||
|
|
|
@ -1,21 +1,24 @@
|
||||||
@brian-brazil is the main/default maintainer, some parts of the codebase have other maintainers:
|
# Maintainers
|
||||||
|
|
||||||
|
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
|
||||||
|
|
||||||
* `cmd`
|
* `cmd`
|
||||||
* `promtool`: @simonpasquier
|
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
|
||||||
* `discovery`
|
* `discovery`
|
||||||
* `k8s`: @brancz
|
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||||
* `documentation`
|
* `documentation`
|
||||||
* `prometheus-mixin`: @beorn7
|
* `prometheus-mixin`: Björn Rabenstein (<beorn@grafana.com> / @beorn7)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: @csmarchbanks, @cstyan
|
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||||
* `tsdb`: @codesome, @krasi-georgiev
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
|
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||||
* `web`
|
* `web`
|
||||||
* `ui`: @juliusv
|
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||||
* `Makefile` and related build configuration: @simonpasquier, @SuperQ
|
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||||
|
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
|
||||||
For the sake of brevity all subtrees are not explicitly listed. Due to the size
|
|
||||||
of this repository, the natural changes in focus of maintainers over time, and
|
|
||||||
nuances of where particular features live, this list will always be incomplete
|
|
||||||
and out of date. However the listed maintainer(s) should be able to direct a
|
|
||||||
PR/question to the right person.
|
|
||||||
|
|
||||||
|
For the sake of brevity, not all subtrees are explicitly listed. Due to the
|
||||||
|
size of this repository, the natural changes in focus of maintainers over time,
|
||||||
|
and nuances of where particular features live, this list will always be
|
||||||
|
incomplete and out of date. However the listed maintainer(s) should be able to
|
||||||
|
direct a PR/question to the right person.
|
||||||
|
|
145
Makefile
145
Makefile
|
@ -12,64 +12,99 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
# Needs to be defined before including Makefile.common to auto-generate targets
|
# Needs to be defined before including Makefile.common to auto-generate targets
|
||||||
DOCKER_ARCHS ?= amd64 armv7 arm64 s390x
|
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
|
||||||
|
|
||||||
REACT_APP_PATH = web/ui/react-app
|
UI_PATH = web/ui
|
||||||
REACT_APP_SOURCE_FILES = $(wildcard $(REACT_APP_PATH)/public/* $(REACT_APP_PATH)/src/* $(REACT_APP_PATH)/tsconfig.json)
|
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules
|
||||||
REACT_APP_OUTPUT_DIR = web/ui/static/react
|
|
||||||
REACT_APP_NODE_MODULES_PATH = $(REACT_APP_PATH)/node_modules
|
|
||||||
REACT_APP_NPM_LICENSES_TARBALL = "npm_licenses.tar.bz2"
|
REACT_APP_NPM_LICENSES_TARBALL = "npm_licenses.tar.bz2"
|
||||||
|
|
||||||
TSDB_PROJECT_DIR = "./tsdb"
|
PROMTOOL = ./promtool
|
||||||
TSDB_CLI_DIR="$(TSDB_PROJECT_DIR)/cmd/tsdb"
|
|
||||||
TSDB_BIN = "$(TSDB_CLI_DIR)/tsdb"
|
|
||||||
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
||||||
TSDB_BENCHMARK_DATASET ?= "$(TSDB_PROJECT_DIR)/testdata/20kseries.json"
|
TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
|
||||||
TSDB_BENCHMARK_OUTPUT_DIR ?= "$(TSDB_CLI_DIR)/benchout"
|
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
|
||||||
|
|
||||||
|
GOLANGCI_LINT_OPTS ?= --timeout 4m
|
||||||
|
|
||||||
include Makefile.common
|
include Makefile.common
|
||||||
|
|
||||||
DOCKER_IMAGE_NAME ?= prometheus
|
DOCKER_IMAGE_NAME ?= prometheus
|
||||||
|
|
||||||
$(REACT_APP_NODE_MODULES_PATH): $(REACT_APP_PATH)/package.json $(REACT_APP_PATH)/yarn.lock
|
.PHONY: update-npm-deps
|
||||||
cd $(REACT_APP_PATH) && yarn --frozen-lockfile
|
update-npm-deps:
|
||||||
|
@echo ">> updating npm dependencies"
|
||||||
|
./scripts/npm-deps.sh "minor"
|
||||||
|
|
||||||
$(REACT_APP_OUTPUT_DIR): $(REACT_APP_NODE_MODULES_PATH) $(REACT_APP_SOURCE_FILES)
|
.PHONY: upgrade-npm-deps
|
||||||
@echo ">> building React app"
|
upgrade-npm-deps:
|
||||||
@./scripts/build_react_app.sh
|
@echo ">> upgrading npm dependencies"
|
||||||
|
./scripts/npm-deps.sh "latest"
|
||||||
|
|
||||||
|
.PHONY: ui-bump-version
|
||||||
|
ui-bump-version:
|
||||||
|
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||||
|
cd web/ui && npm install
|
||||||
|
git add "./web/ui/package-lock.json" "./**/package.json"
|
||||||
|
|
||||||
|
.PHONY: ui-install
|
||||||
|
ui-install:
|
||||||
|
cd $(UI_PATH) && npm install
|
||||||
|
|
||||||
|
.PHONY: ui-build
|
||||||
|
ui-build:
|
||||||
|
cd $(UI_PATH) && CI="" npm run build
|
||||||
|
|
||||||
|
.PHONY: ui-build-module
|
||||||
|
ui-build-module:
|
||||||
|
cd $(UI_PATH) && npm run build:module
|
||||||
|
|
||||||
|
.PHONY: ui-test
|
||||||
|
ui-test:
|
||||||
|
cd $(UI_PATH) && CI=true npm run test
|
||||||
|
|
||||||
|
.PHONY: ui-lint
|
||||||
|
ui-lint:
|
||||||
|
cd $(UI_PATH) && npm run lint
|
||||||
|
|
||||||
.PHONY: assets
|
.PHONY: assets
|
||||||
assets: $(REACT_APP_OUTPUT_DIR)
|
assets: ui-install ui-build
|
||||||
@echo ">> writing assets"
|
|
||||||
# Un-setting GOOS and GOARCH here because the generated Go code is always the same,
|
|
||||||
# but the cached object code is incompatible between architectures and OSes (which
|
|
||||||
# breaks cross-building for different combinations on CI in the same container).
|
|
||||||
cd web/ui && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS)
|
|
||||||
@$(GOFMT) -w ./web/ui
|
|
||||||
|
|
||||||
.PHONY: react-app-lint
|
.PHONY: assets-compress
|
||||||
react-app-lint:
|
assets-compress: assets
|
||||||
@echo ">> running React app linting"
|
@echo '>> compressing assets'
|
||||||
cd $(REACT_APP_PATH) && yarn lint:ci
|
scripts/compress_assets.sh
|
||||||
|
|
||||||
.PHONY: react-app-lint-fix
|
.PHONY: assets-tarball
|
||||||
react-app-lint-fix:
|
assets-tarball: assets
|
||||||
@echo ">> running React app linting and fixing errors where possible"
|
@echo '>> packaging assets'
|
||||||
cd $(REACT_APP_PATH) && yarn lint
|
scripts/package_assets.sh
|
||||||
|
|
||||||
.PHONY: react-app-test
|
# We only want to generate the parser when there's changes to the grammar.
|
||||||
react-app-test: | $(REACT_APP_NODE_MODULES_PATH) react-app-lint
|
.PHONY: parser
|
||||||
@echo ">> running React app tests"
|
parser:
|
||||||
cd $(REACT_APP_PATH) && yarn test --no-watch --coverage
|
@echo ">> running goyacc to generate the .go file."
|
||||||
|
ifeq (, $(shell command -v goyacc > /dev/null))
|
||||||
|
@echo "goyacc not installed so skipping"
|
||||||
|
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
|
||||||
|
else
|
||||||
|
goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: common-test react-app-test
|
# If we only want to only test go code we have to change the test target
|
||||||
|
# which is called by all.
|
||||||
|
ifeq ($(GO_ONLY),1)
|
||||||
|
test: common-test
|
||||||
|
else
|
||||||
|
test: common-test ui-build-module ui-test ui-lint
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: npm_licenses
|
.PHONY: npm_licenses
|
||||||
npm_licenses: $(REACT_APP_NODE_MODULES_PATH)
|
npm_licenses: ui-install
|
||||||
@echo ">> bundling npm licenses"
|
@echo ">> bundling npm licenses"
|
||||||
rm -f $(REACT_APP_NPM_LICENSES_TARBALL)
|
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
|
||||||
find $(REACT_APP_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
|
ln -s . npm_licenses
|
||||||
|
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
|
||||||
|
rm -f npm_licenses
|
||||||
|
|
||||||
.PHONY: tarball
|
.PHONY: tarball
|
||||||
tarball: npm_licenses common-tarball
|
tarball: npm_licenses common-tarball
|
||||||
|
@ -77,19 +112,29 @@ tarball: npm_licenses common-tarball
|
||||||
.PHONY: docker
|
.PHONY: docker
|
||||||
docker: npm_licenses common-docker
|
docker: npm_licenses common-docker
|
||||||
|
|
||||||
.PHONY: build
|
plugins/plugins.go: plugins.yml plugins/generate.go
|
||||||
build: assets common-build
|
@echo ">> creating plugins list"
|
||||||
|
$(GO) generate -tags plugins ./plugins
|
||||||
|
|
||||||
.PHONY: build_tsdb
|
.PHONY: plugins
|
||||||
build_tsdb:
|
plugins: plugins/plugins.go
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) build -o $(TSDB_BIN) $(TSDB_CLI_DIR)
|
|
||||||
|
.PHONY: build
|
||||||
|
build: assets npm_licenses assets-compress plugins common-build
|
||||||
|
|
||||||
.PHONY: bench_tsdb
|
.PHONY: bench_tsdb
|
||||||
bench_tsdb: build_tsdb
|
bench_tsdb: $(PROMU)
|
||||||
|
@echo ">> building promtool"
|
||||||
|
@GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) promtool
|
||||||
@echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)"
|
@echo ">> running benchmark, writing result to $(TSDB_BENCHMARK_OUTPUT_DIR)"
|
||||||
@$(TSDB_BIN) bench write --metrics=$(TSDB_BENCHMARK_NUM_METRICS) --out=$(TSDB_BENCHMARK_OUTPUT_DIR) $(TSDB_BENCHMARK_DATASET)
|
@$(PROMTOOL) tsdb bench write --metrics=$(TSDB_BENCHMARK_NUM_METRICS) --out=$(TSDB_BENCHMARK_OUTPUT_DIR) $(TSDB_BENCHMARK_DATASET)
|
||||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/cpu.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/cpuprof.svg
|
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/cpu.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/cpuprof.svg
|
||||||
@$(GO) tool pprof --inuse_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.inuse.svg
|
@$(GO) tool pprof --inuse_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.inuse.svg
|
||||||
@$(GO) tool pprof --alloc_space -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
@$(GO) tool pprof --alloc_space -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mem.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/memprof.alloc.svg
|
||||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/block.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/blockprof.svg
|
||||||
@$(GO) tool pprof -svg $(TSDB_BIN) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
@$(GO) tool pprof -svg $(PROMTOOL) $(TSDB_BENCHMARK_OUTPUT_DIR)/mutex.prof > $(TSDB_BENCHMARK_OUTPUT_DIR)/mutexprof.svg
|
||||||
|
|
||||||
|
.PHONY: cli-documentation
|
||||||
|
cli-documentation:
|
||||||
|
$(GO) run ./cmd/prometheus/ --write-documentation > docs/command-line/prometheus.md
|
||||||
|
$(GO) run ./cmd/promtool/ write-documentation > docs/command-line/promtool.md
|
||||||
|
|
132
Makefile.common
132
Makefile.common
|
@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version)
|
||||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
GOVENDOR :=
|
|
||||||
GO111MODULE :=
|
|
||||||
ifeq (, $(PRE_GO_111))
|
|
||||||
ifneq (,$(wildcard go.mod))
|
|
||||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
|
||||||
GO111MODULE := on
|
|
||||||
|
|
||||||
ifneq (,$(wildcard vendor))
|
|
||||||
# Always use the local vendor/ directory to satisfy the dependencies.
|
|
||||||
GOOPTS := $(GOOPTS) -mod=vendor
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
else
|
|
||||||
ifneq (,$(wildcard go.mod))
|
|
||||||
ifneq (,$(wildcard vendor))
|
|
||||||
$(warning This repository requires Go >= 1.11 because of Go modules)
|
|
||||||
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
|
||||||
endif
|
|
||||||
else
|
|
||||||
# This repository isn't using Go modules (yet).
|
|
||||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
pkgs = ./...
|
pkgs = ./...
|
||||||
|
|
||||||
|
@ -69,17 +46,35 @@ else
|
||||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.5.0
|
GOTEST := $(GO) test
|
||||||
|
GOTEST_DIR :=
|
||||||
|
ifneq ($(CIRCLE_JOB),)
|
||||||
|
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||||
|
GOTEST_DIR := test-results
|
||||||
|
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
PROMU_VERSION ?= 0.15.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.18.0
|
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
# If we're in CI and there is an Actions file, that means the linter
|
||||||
|
# is being run in Actions, so we don't need to run it here.
|
||||||
|
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||||
|
GOLANGCI_LINT :=
|
||||||
|
else ifeq (,$(CIRCLE_JOB))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -96,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
|
||||||
|
|
||||||
ifeq ($(GOHOSTARCH),amd64)
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
# Only supported on amd64
|
# Only supported on amd64
|
||||||
|
@ -109,7 +106,7 @@ endif
|
||||||
%: common-% ;
|
%: common-% ;
|
||||||
|
|
||||||
.PHONY: common-all
|
.PHONY: common-all
|
||||||
common-all: precheck style check_license lint unused build test
|
common-all: precheck style check_license lint yamllint unused build test
|
||||||
|
|
||||||
.PHONY: common-style
|
.PHONY: common-style
|
||||||
common-style:
|
common-style:
|
||||||
|
@ -135,44 +132,56 @@ common-check_license:
|
||||||
.PHONY: common-deps
|
.PHONY: common-deps
|
||||||
common-deps:
|
common-deps:
|
||||||
@echo ">> getting dependencies"
|
@echo ">> getting dependencies"
|
||||||
ifdef GO111MODULE
|
$(GO) mod download
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
|
||||||
else
|
.PHONY: update-go-deps
|
||||||
$(GO) get $(GOOPTS) -t ./...
|
update-go-deps:
|
||||||
endif
|
@echo ">> updating Go dependencies"
|
||||||
|
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||||
|
$(GO) get -d $$m; \
|
||||||
|
done
|
||||||
|
$(GO) mod tidy
|
||||||
|
|
||||||
.PHONY: common-test-short
|
.PHONY: common-test-short
|
||||||
common-test-short:
|
common-test-short: $(GOTEST_DIR)
|
||||||
@echo ">> running short tests"
|
@echo ">> running short tests"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
$(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-test
|
.PHONY: common-test
|
||||||
common-test:
|
common-test: $(GOTEST_DIR)
|
||||||
@echo ">> running all tests"
|
@echo ">> running all tests"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
$(GOTEST_DIR):
|
||||||
|
@mkdir -p $@
|
||||||
|
|
||||||
.PHONY: common-format
|
.PHONY: common-format
|
||||||
common-format:
|
common-format:
|
||||||
@echo ">> formatting code"
|
@echo ">> formatting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
$(GO) fmt $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-vet
|
.PHONY: common-vet
|
||||||
common-vet:
|
common-vet:
|
||||||
@echo ">> vetting code"
|
@echo ">> vetting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
$(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-lint
|
.PHONY: common-lint
|
||||||
common-lint: $(GOLANGCI_LINT)
|
common-lint: $(GOLANGCI_LINT)
|
||||||
ifdef GOLANGCI_LINT
|
ifdef GOLANGCI_LINT
|
||||||
@echo ">> running golangci-lint"
|
@echo ">> running golangci-lint"
|
||||||
ifdef GO111MODULE
|
|
||||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
else
|
|
||||||
$(GOLANGCI_LINT) run $(pkgs)
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
.PHONY: common-yamllint
|
||||||
|
common-yamllint:
|
||||||
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
|
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||||
|
@echo "yamllint not installed so skipping"
|
||||||
|
else
|
||||||
|
yamllint .
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For backward-compatibility.
|
# For backward-compatibility.
|
||||||
|
@ -180,28 +189,15 @@ endif
|
||||||
common-staticcheck: lint
|
common-staticcheck: lint
|
||||||
|
|
||||||
.PHONY: common-unused
|
.PHONY: common-unused
|
||||||
common-unused: $(GOVENDOR)
|
common-unused:
|
||||||
ifdef GOVENDOR
|
|
||||||
@echo ">> running check for unused packages"
|
|
||||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
|
||||||
else
|
|
||||||
ifdef GO111MODULE
|
|
||||||
@echo ">> running check for unused/missing packages in go.mod"
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
$(GO) mod tidy
|
||||||
ifeq (,$(wildcard vendor))
|
|
||||||
@git diff --exit-code -- go.sum go.mod
|
@git diff --exit-code -- go.sum go.mod
|
||||||
else
|
|
||||||
@echo ">> running check for unused packages in vendor/"
|
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
|
||||||
@git diff --exit-code -- go.sum go.mod vendor/
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: common-build
|
.PHONY: common-build
|
||||||
common-build: promu
|
common-build: promu
|
||||||
@echo ">> building binaries"
|
@echo ">> building binaries"
|
||||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||||
|
|
||||||
.PHONY: common-tarball
|
.PHONY: common-tarball
|
||||||
common-tarball: promu
|
common-tarball: promu
|
||||||
|
@ -211,7 +207,7 @@ common-tarball: promu
|
||||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
||||||
-f $(DOCKERFILE_PATH) \
|
-f $(DOCKERFILE_PATH) \
|
||||||
--build-arg ARCH="$*" \
|
--build-arg ARCH="$*" \
|
||||||
--build-arg OS="linux" \
|
--build-arg OS="linux" \
|
||||||
|
@ -220,17 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
|
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-manifest
|
.PHONY: common-docker-manifest
|
||||||
common-docker-manifest:
|
common-docker-manifest:
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu: $(PROMU)
|
promu: $(PROMU)
|
||||||
|
@ -255,12 +253,6 @@ $(GOLANGCI_LINT):
|
||||||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef GOVENDOR
|
|
||||||
.PHONY: $(GOVENDOR)
|
|
||||||
$(GOVENDOR):
|
|
||||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: precheck
|
.PHONY: precheck
|
||||||
precheck::
|
precheck::
|
||||||
|
|
||||||
|
|
19
NOTICE
19
NOTICE
|
@ -86,8 +86,23 @@ https://github.com/samuel/go-zookeeper
|
||||||
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
|
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
|
||||||
See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details.
|
See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details.
|
||||||
|
|
||||||
|
Time series compression algorithm from Facebook's Gorilla paper
|
||||||
|
https://github.com/dgryski/go-tsz
|
||||||
|
Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
|
||||||
|
See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
|
||||||
|
|
||||||
|
The Go programming language
|
||||||
|
https://go.dev/
|
||||||
|
Copyright (c) 2009 The Go Authors
|
||||||
|
See https://go.dev/LICENSE for license details.
|
||||||
|
|
||||||
|
The Codicon icon font from Microsoft
|
||||||
|
https://github.com/microsoft/vscode-codicons
|
||||||
|
Copyright (c) Microsoft Corporation and other contributors
|
||||||
|
See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details.
|
||||||
|
|
||||||
We also use code from a large number of npm packages. For details, see:
|
We also use code from a large number of npm packages. For details, see:
|
||||||
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json
|
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json
|
||||||
- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json
|
- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json
|
||||||
- The individual package licenses as copied from the node_modules directory can be found in
|
- The individual package licenses as copied from the node_modules directory can be found in
|
||||||
the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
|
the npm_licenses.tar.bz2 archive in release tarballs and Docker images.
|
||||||
|
|
177
README.md
177
README.md
|
@ -1,34 +1,40 @@
|
||||||
# Prometheus
|
<h1 align="center" style="border-bottom: none">
|
||||||
|
<a href="//prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
|
||||||
|
</h1>
|
||||||
|
|
||||||
[![CircleCI](https://circleci.com/gh/prometheus/prometheus/tree/master.svg?style=shield)][circleci]
|
<p align="center">Visit <a href="//prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
|
||||||
|
examples and guides.</p>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
[![CI](https://github.com/prometheus/prometheus/actions/workflows/ci.yml/badge.svg)](https://github.com/prometheus/prometheus/actions/workflows/ci.yml)
|
||||||
[![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay]
|
[![Docker Repository on Quay](https://quay.io/repository/prometheus/prometheus/status)][quay]
|
||||||
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
|
[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub]
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus)
|
||||||
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
|
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
|
||||||
[![fuzzit](https://app.fuzzit.dev/badge?org_id=prometheus&branch=master)](https://fuzzit.dev)
|
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
||||||
|
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
||||||
|
|
||||||
Visit [prometheus.io](https://prometheus.io) for the full documentation,
|
</div>
|
||||||
examples and guides.
|
|
||||||
|
|
||||||
Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics
|
Prometheus, a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics
|
||||||
from configured targets at given intervals, evaluates rule expressions,
|
from configured targets at given intervals, evaluates rule expressions,
|
||||||
displays the results, and can trigger alerts if some condition is observed
|
displays the results, and can trigger alerts when specified conditions are observed.
|
||||||
to be true.
|
|
||||||
|
|
||||||
Prometheus's main distinguishing features as compared to other monitoring systems are:
|
The features that distinguish Prometheus from other metrics and monitoring systems are:
|
||||||
|
|
||||||
- a **multi-dimensional** data model (timeseries defined by metric name and set of key/value dimensions)
|
* A **multi-dimensional** data model (time series defined by metric name and set of key/value dimensions)
|
||||||
- a **flexible query language** to leverage this dimensionality
|
* PromQL, a **powerful and flexible query language** to leverage this dimensionality
|
||||||
- no dependency on distributed storage; **single server nodes are autonomous**
|
* No dependency on distributed storage; **single server nodes are autonomous**
|
||||||
- timeseries collection happens via a **pull model** over HTTP
|
* An HTTP **pull model** for time series collection
|
||||||
- **pushing timeseries** is supported via an intermediary gateway
|
* **Pushing time series** is supported via an intermediary gateway for batch jobs
|
||||||
- targets are discovered via **service discovery** or **static configuration**
|
* Targets are discovered via **service discovery** or **static configuration**
|
||||||
- multiple modes of **graphing and dashboarding support**
|
* Multiple modes of **graphing and dashboarding support**
|
||||||
- support for hierarchical and horizontal **federation**
|
* Support for hierarchical and horizontal **federation**
|
||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||
![](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg)
|
![Architecture overview](documentation/images/architecture.svg)
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
|
@ -43,77 +49,142 @@ is the recommended way of installing Prometheus.
|
||||||
See the [Installing](https://prometheus.io/docs/introduction/install/)
|
See the [Installing](https://prometheus.io/docs/introduction/install/)
|
||||||
chapter in the documentation for all the details.
|
chapter in the documentation for all the details.
|
||||||
|
|
||||||
Debian packages [are available](https://packages.debian.org/sid/net/prometheus).
|
|
||||||
|
|
||||||
### Docker images
|
### Docker images
|
||||||
|
|
||||||
Docker images are available on [Quay.io](https://quay.io/repository/prometheus/prometheus) or [Docker Hub](https://hub.docker.com/r/prom/prometheus/).
|
Docker images are available on [Quay.io](https://quay.io/repository/prometheus/prometheus) or [Docker Hub](https://hub.docker.com/r/prom/prometheus/).
|
||||||
|
|
||||||
You can launch a Prometheus container for trying it out with
|
You can launch a Prometheus container for trying it out with
|
||||||
|
|
||||||
$ docker run --name prometheus -d -p 127.0.0.1:9090:9090 prom/prometheus
|
```bash
|
||||||
|
docker run --name prometheus -d -p 127.0.0.1:9090:9090 prom/prometheus
|
||||||
|
```
|
||||||
|
|
||||||
Prometheus will now be reachable at http://localhost:9090/.
|
Prometheus will now be reachable at <http://localhost:9090/>.
|
||||||
|
|
||||||
### Building from source
|
### Building from source
|
||||||
|
|
||||||
To build Prometheus from the source code yourself you need to have a working
|
To build Prometheus from source code, You need:
|
||||||
Go environment with [version 1.13 or greater installed](https://golang.org/doc/install).
|
|
||||||
You will also need to have [Node.js](https://nodejs.org/) and [Yarn](https://yarnpkg.com/)
|
|
||||||
installed in order to build the frontend assets.
|
|
||||||
|
|
||||||
You can directly use the `go` tool to download and install the `prometheus`
|
* Go [version 1.17 or greater](https://golang.org/doc/install).
|
||||||
|
* NodeJS [version 16 or greater](https://nodejs.org/).
|
||||||
|
* npm [version 7 or greater](https://www.npmjs.com/).
|
||||||
|
|
||||||
|
Start by cloning the repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/prometheus/prometheus.git
|
||||||
|
cd prometheus
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use the `go` tool to build and install the `prometheus`
|
||||||
and `promtool` binaries into your `GOPATH`:
|
and `promtool` binaries into your `GOPATH`:
|
||||||
|
|
||||||
$ go get github.com/prometheus/prometheus/cmd/...
|
```bash
|
||||||
$ prometheus --config.file=your_config.yml
|
GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
|
||||||
|
prometheus --config.file=your_config.yml
|
||||||
|
```
|
||||||
|
|
||||||
*However*, when using `go get` to build Prometheus, Prometheus will expect to be able to
|
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
|
||||||
read its web assets from local filesystem directories under `web/ui/static` and
|
read its web assets from local filesystem directories under `web/ui/static` and
|
||||||
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
|
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
|
||||||
from the root of the cloned repository. Note also that these directories do not include the
|
from the root of the cloned repository. Note also that these directories do not include the
|
||||||
new experimental React UI unless it has been built explicitly using `make assets` or `make build`.
|
React UI unless it has been built explicitly using `make assets` or `make build`.
|
||||||
|
|
||||||
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus.yml)
|
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
|
||||||
|
|
||||||
You can also clone the repository yourself and build using `make build`, which will compile in
|
You can also build using `make build`, which will compile in the web assets so that
|
||||||
the web assets so that Prometheus can be run from anywhere:
|
Prometheus can be run from anywhere:
|
||||||
|
|
||||||
$ mkdir -p $GOPATH/src/github.com/prometheus
|
```bash
|
||||||
$ cd $GOPATH/src/github.com/prometheus
|
make build
|
||||||
$ git clone https://github.com/prometheus/prometheus.git
|
./prometheus --config.file=your_config.yml
|
||||||
$ cd prometheus
|
```
|
||||||
$ make build
|
|
||||||
$ ./prometheus --config.file=your_config.yml
|
|
||||||
|
|
||||||
The Makefile provides several targets:
|
The Makefile provides several targets:
|
||||||
|
|
||||||
* *build*: build the `prometheus` and `promtool` binaries (includes building and compiling in web assets)
|
* *build*: build the `prometheus` and `promtool` binaries (includes building and compiling in web assets)
|
||||||
* *test*: run the tests
|
* *test*: run the tests
|
||||||
* *test-short*: run the short tests
|
* *test-short*: run the short tests
|
||||||
* *format*: format the source code
|
* *format*: format the source code
|
||||||
* *vet*: check the source code for common errors
|
* *vet*: check the source code for common errors
|
||||||
* *docker*: build a docker container for the current `HEAD`
|
* *assets*: build the React UI
|
||||||
|
|
||||||
|
### Service discovery plugins
|
||||||
|
|
||||||
|
Prometheus is bundled with many service discovery plugins.
|
||||||
|
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||||
|
file to disable some service discoveries. The file is a yaml-formated list of go
|
||||||
|
import path that will be built into the Prometheus binary.
|
||||||
|
|
||||||
|
After you have changed the file, you
|
||||||
|
need to run `make build` again.
|
||||||
|
|
||||||
|
If you are using another method to compile Prometheus, `make plugins` will
|
||||||
|
generate the plugins file accordingly.
|
||||||
|
|
||||||
|
If you add out-of-tree plugins, which we do not endorse at the moment,
|
||||||
|
additional steps might be needed to adjust the `go.mod` and `go.sum` files. As
|
||||||
|
always, be extra careful when loading third party code.
|
||||||
|
|
||||||
|
### Building the Docker image
|
||||||
|
|
||||||
|
The `make docker` target is designed for use in our CI system.
|
||||||
|
You can build a docker image locally with the following commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make promu
|
||||||
|
promu crossbuild -p linux/amd64
|
||||||
|
make npm_licenses
|
||||||
|
make common-docker-amd64
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Prometheus as a Go Library
|
||||||
|
|
||||||
|
### Remote Write
|
||||||
|
|
||||||
|
We are publishing our Remote Write protobuf independently at
|
||||||
|
[buf.build](https://buf.build/prometheus/prometheus/assets).
|
||||||
|
|
||||||
|
You can use that as a library:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get go.buf.build/protocolbuffers/go/prometheus/prometheus
|
||||||
|
```
|
||||||
|
|
||||||
|
This is experimental.
|
||||||
|
|
||||||
|
### Prometheus code base
|
||||||
|
|
||||||
|
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
|
||||||
|
Prometheus release number do not exactly match Go module releases. For the
|
||||||
|
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
|
||||||
|
|
||||||
|
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/prometheus/prometheus@v0.35.0
|
||||||
|
```
|
||||||
|
|
||||||
|
This solution makes it clear that we might break our internal Go APIs between
|
||||||
|
minor user-facing releases, as [breaking changes are allowed in major version
|
||||||
|
zero](https://semver.org/#spec-item-4).
|
||||||
|
|
||||||
## React UI Development
|
## React UI Development
|
||||||
|
|
||||||
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/README.md).
|
For more information on building, running, and developing on the React-based UI, see the React app's [README.md](web/ui/README.md).
|
||||||
|
|
||||||
## More information
|
## More information
|
||||||
|
|
||||||
* The source code is periodically indexed: [Prometheus Core](https://godoc.org/github.com/prometheus/prometheus).
|
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
|
||||||
* You will find a CircleCI configuration in `.circleci/config.yml`.
|
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
||||||
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/master/CONTRIBUTING.md)
|
Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/master/LICENSE).
|
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).
|
||||||
|
|
||||||
|
|
||||||
[hub]: https://hub.docker.com/r/prom/prometheus/
|
[hub]: https://hub.docker.com/r/prom/prometheus/
|
||||||
[circleci]: https://circleci.com/gh/prometheus/prometheus
|
|
||||||
[quay]: https://quay.io/repository/prometheus/prometheus
|
[quay]: https://quay.io/repository/prometheus/prometheus
|
||||||
|
|
159
RELEASE.md
159
RELEASE.md
|
@ -21,7 +21,38 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||||
| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) |
|
| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) |
|
| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) |
|
||||||
| v2.17 | 2020-03-11 | **searching for volunteer** |
|
| v2.17 | 2020-03-11 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.18 | 2020-04-22 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
|
| v2.19 | 2020-06-03 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
|
| v2.20 | 2020-07-15 | Björn Rabenstein (GitHub: @beorn7) |
|
||||||
|
| v2.21 | 2020-08-26 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.22 | 2020-10-07 | Frederic Branczyk (GitHub: @brancz) |
|
||||||
|
| v2.23 | 2020-11-18 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
|
| v2.24 | 2020-12-30 | Björn Rabenstein (GitHub: @beorn7) |
|
||||||
|
| v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
|
| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||||
|
| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) |
|
||||||
|
| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) |
|
||||||
|
| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
|
| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) |
|
||||||
|
| v2.33 | 2022-01-12 | Björn Rabenstein (GitHub: @beorn7) |
|
||||||
|
| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||||
|
| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) |
|
||||||
|
| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) |
|
||||||
|
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
|
||||||
|
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
|
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
|
||||||
|
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||||
|
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||||
|
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
@ -29,10 +60,10 @@ If you are interested in volunteering please create a pull request against the [
|
||||||
|
|
||||||
The release shepherd is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process formally starts with the initial pre-release, but some preparations should be done a few days in advance.
|
The release shepherd is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process formally starts with the initial pre-release, but some preparations should be done a few days in advance.
|
||||||
|
|
||||||
* We aim to keep the master branch in a working state at all times. In principle, it should be possible to cut a release from master at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of master. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release.
|
* We aim to keep the main branch in a working state at all times. In principle, it should be possible to cut a release from main at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of main. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release.
|
||||||
* On the date listed in the table above, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-<major>.<minor>` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release.
|
* On the date listed in the table above, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-<major>.<minor>` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release.
|
||||||
* With the pre-release, the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release.
|
* With the pre-release, the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release.
|
||||||
* If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.).
|
* If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.).
|
||||||
|
|
||||||
See the next section for details on cutting an individual release.
|
See the next section for details on cutting an individual release.
|
||||||
|
|
||||||
|
@ -46,25 +77,22 @@ We use [Semantic Versioning](https://semver.org/).
|
||||||
|
|
||||||
We maintain a separate branch for each minor release, named `release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
|
We maintain a separate branch for each minor release, named `release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
|
||||||
|
|
||||||
The usual flow is to merge new features and changes into the master branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into master from the latest release branch. The master branch should always contain all commits from the latest release branch. As long as master hasn't deviated from the release branch, new commits can also go to master, followed by merging master back into the release branch.
|
Note that branch protection kicks in automatically for any branches whose name starts with `release-`. Never use names starting with `release-` for branches that are not release branches.
|
||||||
|
|
||||||
If a bug fix got accidentally merged into master after non-bug-fix changes in master, the bug-fix commits have to be cherry-picked into the release branch, which then have to be merged back into master. Try to avoid that situation.
|
The usual flow is to merge new features and changes into the main branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into main from the latest release branch. The main branch should always contain all commits from the latest release branch. As long as main hasn't deviated from the release branch, new commits can also go to main, followed by merging main back into the release branch.
|
||||||
|
|
||||||
|
If a bug fix got accidentally merged into main after non-bug-fix changes in main, the bug-fix commits have to be cherry-picked into the release branch, which then have to be merged back into main. Try to avoid that situation.
|
||||||
|
|
||||||
Maintaining the release branches for older minor releases happens on a best effort basis.
|
Maintaining the release branches for older minor releases happens on a best effort basis.
|
||||||
|
|
||||||
### Updating dependencies
|
### 0. Updating dependencies and promoting/demoting experimental features
|
||||||
|
|
||||||
A few days before a major or minor release, consider updating the dependencies:
|
A few days before a major or minor release, consider updating the dependencies.
|
||||||
|
|
||||||
```
|
Note that we use [Dependabot](.github/dependabot.yml) to continuously update most things automatically. Therefore, most dependencies should be up to date.
|
||||||
export GO111MODULE=on
|
Check the [dependencies GitHub label](https://github.com/prometheus/prometheus/labels/dependencies) to see if there are any pending updates.
|
||||||
go get -u ./...
|
|
||||||
go mod tidy
|
|
||||||
go mod vendor
|
|
||||||
git add go.mod go.sum vendor
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create a pull request against the master branch.
|
This bot currently does not manage `+incompatible` and `v0.0.0` in the version specifier for Go modules.
|
||||||
|
|
||||||
Note that after a dependency update, you should look out for any weirdness that
|
Note that after a dependency update, you should look out for any weirdness that
|
||||||
might have happened. Such weirdnesses include but are not limited to: flaky
|
might have happened. Such weirdnesses include but are not limited to: flaky
|
||||||
|
@ -75,54 +103,107 @@ you can skip the dependency update or only update select dependencies. In such a
|
||||||
case, you have to create an issue or pull request in the GitHub project for
|
case, you have to create an issue or pull request in the GitHub project for
|
||||||
later follow-up.
|
later follow-up.
|
||||||
|
|
||||||
### Prepare your release
|
This is also a good time to consider any experimental features and feature
|
||||||
|
flags for promotion to stable or for deprecation or ultimately removal. Do any
|
||||||
|
of these in pull requests, one per feature.
|
||||||
|
|
||||||
For a patch release, work in the branch of the minor release you want to patch.
|
#### Manually updating Go dependencies
|
||||||
|
|
||||||
For a new major or minor release, create the corresponding release branch based on the master branch.
|
This is usually only needed for `+incompatible` and `v0.0.0` non-semver updates.
|
||||||
|
|
||||||
Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular.
|
```bash
|
||||||
|
make update-go-deps
|
||||||
|
git add go.mod go.sum
|
||||||
|
git commit -m "Update dependencies"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Manually updating React dependencies
|
||||||
|
|
||||||
|
The React application recently moved to a monorepo system with multiple internal npm packages. Dependency upgrades are
|
||||||
|
quite sensitive for the time being.
|
||||||
|
|
||||||
|
In case you want to update the UI dependencies, you can run the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make update-npm-deps
|
||||||
|
```
|
||||||
|
|
||||||
|
Once this step completes, please verify that no additional `node_modules` directory was created in any of the module subdirectories
|
||||||
|
(which could indicate conflicting dependency versions across modules). Then run `make ui-build` to verify that the build is still working.
|
||||||
|
|
||||||
|
Note: Once in a while, the npm dependencies should also be updated to their latest release versions (major or minor) with `make upgrade-npm-deps`,
|
||||||
|
though this may be done at convenient times (e.g. by the UI maintainers) that are out-of-sync with Prometheus releases.
|
||||||
|
|
||||||
|
### 1. Prepare your release
|
||||||
|
|
||||||
|
At the start of a new major or minor release cycle create the corresponding release branch based on the main branch. For example if we're releasing `2.17.0` and the previous stable release is `2.16.0` we need to create a `release-2.17` branch. Note that all releases are handled in protected release branches, see the above `Branch management and versioning` section. Release candidates and patch releases for any given major or minor release happen in the same `release-<major>.<minor>` branch. Do not create `release-<version>` for patch or release candidate releases.
|
||||||
|
|
||||||
|
Changes for a patch release or release candidate should be merged into the previously mentioned release branch via pull request.
|
||||||
|
|
||||||
|
Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
|
||||||
|
|
||||||
Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history.
|
Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history.
|
||||||
|
|
||||||
|
For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.
|
||||||
|
|
||||||
Entries in the `CHANGELOG.md` are meant to be in this order:
|
Entries in the `CHANGELOG.md` are meant to be in this order:
|
||||||
|
|
||||||
|
* `[SECURITY]` - A bugfix that specifically fixes a security issue.
|
||||||
* `[CHANGE]`
|
* `[CHANGE]`
|
||||||
* `[FEATURE]`
|
* `[FEATURE]`
|
||||||
* `[ENHANCEMENT]`
|
* `[ENHANCEMENT]`
|
||||||
* `[BUGFIX]`
|
* `[BUGFIX]`
|
||||||
|
|
||||||
### Draft the new release
|
Then bump the UI module version:
|
||||||
|
|
||||||
Tag the new release with a tag named `v<major>.<minor>.<patch>`, e.g. `v2.1.3`. Note the `v` prefix.
|
|
||||||
|
|
||||||
You can do the tagging on the commandline:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ tag=$(< VERSION)
|
make ui-bump-version
|
||||||
$ git tag -s "v${tag}" -m "v${tag}"
|
|
||||||
$ git push origin "v${tag}"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### 2. Draft the new release
|
||||||
|
|
||||||
|
Tag the new release via the following commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tag="v$(< VERSION)"
|
||||||
|
git tag -s "${tag}" -m "${tag}"
|
||||||
|
git push origin "${tag}"
|
||||||
|
```
|
||||||
|
|
||||||
|
Go modules versioning requires strict use of semver. Because we do not commit to
|
||||||
|
avoid code-level breaking changes for the libraries between minor releases of
|
||||||
|
the Prometheus server, we use major version zero releases for the libraries.
|
||||||
|
|
||||||
|
Tag the new library release via the following commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tag="v$(sed s/2/0/ < VERSION)"
|
||||||
|
git tag -s "${tag}" -m "${tag}"
|
||||||
|
git push origin "${tag}"
|
||||||
|
```
|
||||||
|
|
||||||
|
Optionally, you can use this handy `.gitconfig` alias.
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[alias]
|
||||||
|
tag-release = "!f() { tag=v${1:-$(cat VERSION)} ; git tag -s ${tag} -m ${tag} && git push origin ${tag}; }; f"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then release with `git tag-release`.
|
||||||
|
|
||||||
Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing.
|
Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing.
|
||||||
|
|
||||||
Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account.
|
Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account.
|
||||||
|
|
||||||
Now all you can do is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification.
|
Finally, wait for the build step for the tag to finish. The point here is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification.
|
||||||
|
**Note:** for a release candidate version ensure the _This is a pre-release_ box is checked when drafting the release in the Github UI. The CI job should take care of this but it's a good idea to double check before clicking _Publish release_.`
|
||||||
|
|
||||||
### Wrapping up
|
### 3. Wrapping up
|
||||||
|
|
||||||
If the release has happened in the latest release branch, merge the changes into master.
|
For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days using the `/prombench vX.Y.Z` command, `vX.Y.Z` being the latest stable patch release's tag of the previous minor release series, such as `v2.15.2`.
|
||||||
|
|
||||||
To update the docs, a PR needs to be created to `prometheus/docs`. See [this PR](https://github.com/prometheus/docs/pull/952/files) for inspiration (note: only actually merge this for final releases, not for pre-releases like a release candidate).
|
If the release has happened in the latest release branch, merge the changes into main.
|
||||||
|
|
||||||
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
||||||
|
|
||||||
### Pre-releases
|
Finally, in case there is no release shepherd listed for the next release yet, find a volunteer.
|
||||||
|
|
||||||
The following changes to the above procedures apply:
|
|
||||||
|
|
||||||
* In line with [Semantic Versioning](https://semver.org/), append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
|
|
||||||
* Tick the _This is a pre-release_ box when drafting the release in the Github UI.
|
|
||||||
* Still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.
|
|
||||||
* Run the benchmark for 3 days using the `/benchmark x.y.z` command, `x.y.z` being the latest stable patch release of the previous minor release series.
|
|
||||||
|
|
6
SECURITY.md
Normal file
6
SECURITY.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# Reporting a security issue
|
||||||
|
|
||||||
|
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||||
|
found here:
|
||||||
|
|
||||||
|
<https://prometheus.io/docs/operating/security/>
|
File diff suppressed because it is too large
Load diff
|
@ -14,25 +14,38 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var promPath = os.Args[0]
|
const startupTime = 10 * time.Second
|
||||||
var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
|
|
||||||
var promData = filepath.Join(os.TempDir(), "data")
|
var (
|
||||||
|
promPath = os.Args[0]
|
||||||
|
promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
|
||||||
|
agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
|
||||||
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
for i, arg := range os.Args {
|
for i, arg := range os.Args {
|
||||||
|
@ -47,7 +60,6 @@ func TestMain(m *testing.M) {
|
||||||
os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:")
|
os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:")
|
||||||
|
|
||||||
exitCode := m.Run()
|
exitCode := m.Run()
|
||||||
os.RemoveAll(promData)
|
|
||||||
os.Exit(exitCode)
|
os.Exit(exitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,9 +105,9 @@ func TestComputeExternalURL(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
_, err := computeExternalURL(test.input, "0.0.0.0:9090")
|
_, err := computeExternalURL(test.input, "0.0.0.0:9090")
|
||||||
if test.valid {
|
if test.valid {
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
testutil.NotOk(t, err, "input=%q", test.input)
|
require.Error(t, err, "input=%q", test.input)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -107,15 +119,16 @@ func TestFailedStartupExitCode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fakeInputFile := "fake-input-file"
|
fakeInputFile := "fake-input-file"
|
||||||
expectedExitStatus := 1
|
expectedExitStatus := 2
|
||||||
|
|
||||||
prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
|
prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile)
|
||||||
err := prom.Run()
|
err := prom.Run()
|
||||||
testutil.NotOk(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
status := exitError.Sys().(syscall.WaitStatus)
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
testutil.Equals(t, expectedExitStatus, status.ExitStatus())
|
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -135,8 +148,8 @@ func TestSendAlerts(t *testing.T) {
|
||||||
{
|
{
|
||||||
in: []*rules.Alert{
|
in: []*rules.Alert{
|
||||||
{
|
{
|
||||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
Labels: labels.FromStrings("l1", "v1"),
|
||||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
Annotations: labels.FromStrings("a2", "v2"),
|
||||||
ActiveAt: time.Unix(1, 0),
|
ActiveAt: time.Unix(1, 0),
|
||||||
FiredAt: time.Unix(2, 0),
|
FiredAt: time.Unix(2, 0),
|
||||||
ValidUntil: time.Unix(3, 0),
|
ValidUntil: time.Unix(3, 0),
|
||||||
|
@ -144,8 +157,8 @@ func TestSendAlerts(t *testing.T) {
|
||||||
},
|
},
|
||||||
exp: []*notifier.Alert{
|
exp: []*notifier.Alert{
|
||||||
{
|
{
|
||||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
Labels: labels.FromStrings("l1", "v1"),
|
||||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
Annotations: labels.FromStrings("a2", "v2"),
|
||||||
StartsAt: time.Unix(2, 0),
|
StartsAt: time.Unix(2, 0),
|
||||||
EndsAt: time.Unix(3, 0),
|
EndsAt: time.Unix(3, 0),
|
||||||
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
||||||
|
@ -155,8 +168,8 @@ func TestSendAlerts(t *testing.T) {
|
||||||
{
|
{
|
||||||
in: []*rules.Alert{
|
in: []*rules.Alert{
|
||||||
{
|
{
|
||||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
Labels: labels.FromStrings("l1", "v1"),
|
||||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
Annotations: labels.FromStrings("a2", "v2"),
|
||||||
ActiveAt: time.Unix(1, 0),
|
ActiveAt: time.Unix(1, 0),
|
||||||
FiredAt: time.Unix(2, 0),
|
FiredAt: time.Unix(2, 0),
|
||||||
ResolvedAt: time.Unix(4, 0),
|
ResolvedAt: time.Unix(4, 0),
|
||||||
|
@ -164,8 +177,8 @@ func TestSendAlerts(t *testing.T) {
|
||||||
},
|
},
|
||||||
exp: []*notifier.Alert{
|
exp: []*notifier.Alert{
|
||||||
{
|
{
|
||||||
Labels: []labels.Label{{Name: "l1", Value: "v1"}},
|
Labels: labels.FromStrings("l1", "v1"),
|
||||||
Annotations: []labels.Label{{Name: "a2", Value: "v2"}},
|
Annotations: labels.FromStrings("a2", "v2"),
|
||||||
StartsAt: time.Unix(2, 0),
|
StartsAt: time.Unix(2, 0),
|
||||||
EndsAt: time.Unix(4, 0),
|
EndsAt: time.Unix(4, 0),
|
||||||
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1",
|
||||||
|
@ -184,9 +197,9 @@ func TestSendAlerts(t *testing.T) {
|
||||||
if len(tc.in) == 0 {
|
if len(tc.in) == 0 {
|
||||||
t.Fatalf("sender called with 0 alert")
|
t.Fatalf("sender called with 0 alert")
|
||||||
}
|
}
|
||||||
testutil.Equals(t, tc.exp, alerts)
|
require.Equal(t, tc.exp, alerts)
|
||||||
})
|
})
|
||||||
sendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
|
rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -197,18 +210,18 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
||||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--config.file="+promConfig)
|
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||||
|
|
||||||
// Log stderr in case of failure.
|
// Log stderr in case of failure.
|
||||||
stderr, err := prom.StderrPipe()
|
stderr, err := prom.StderrPipe()
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
go func() {
|
go func() {
|
||||||
slurp, _ := ioutil.ReadAll(stderr)
|
slurp, _ := io.ReadAll(stderr)
|
||||||
t.Log(string(slurp))
|
t.Log(string(slurp))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = prom.Start()
|
err = prom.Start()
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if expectedExitStatus == 0 {
|
if expectedExitStatus == 0 {
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
|
@ -216,19 +229,286 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
select {
|
select {
|
||||||
case err := <-done:
|
case err := <-done:
|
||||||
t.Errorf("prometheus should be still running: %v", err)
|
t.Errorf("prometheus should be still running: %v", err)
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(startupTime):
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
|
<-done
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
err = prom.Wait()
|
err = prom.Wait()
|
||||||
testutil.NotOk(t, err)
|
require.Error(t, err)
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
status := exitError.Sys().(syscall.WaitStatus)
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
testutil.Equals(t, expectedExitStatus, status.ExitStatus())
|
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||||
} else {
|
} else {
|
||||||
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||||
|
|
||||||
|
// Log stderr in case of failure.
|
||||||
|
stderr, err := prom.StderrPipe()
|
||||||
|
require.NoError(t, err)
|
||||||
|
go func() {
|
||||||
|
slurp, _ := io.ReadAll(stderr)
|
||||||
|
t.Log(string(slurp))
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = prom.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if expectedExitStatus == 0 {
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() { done <- prom.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
t.Errorf("prometheus should be still running: %v", err)
|
||||||
|
case <-time.After(startupTime):
|
||||||
|
prom.Process.Kill()
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = prom.Wait()
|
||||||
|
require.Error(t, err)
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
|
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||||
|
} else {
|
||||||
|
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimeMetrics(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check initial values.
|
||||||
|
require.Equal(t, map[string]float64{
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds": float64(math.MaxInt64) / 1000,
|
||||||
|
"prometheus_tsdb_head_min_time_seconds": float64(math.MaxInt64) / 1000,
|
||||||
|
"prometheus_tsdb_head_max_time_seconds": float64(math.MinInt64) / 1000,
|
||||||
|
}, getCurrentGaugeValuesFor(t, reg,
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds",
|
||||||
|
"prometheus_tsdb_head_min_time_seconds",
|
||||||
|
"prometheus_tsdb_head_max_time_seconds",
|
||||||
|
))
|
||||||
|
|
||||||
|
app := db.Appender(context.Background())
|
||||||
|
_, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 3000, 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, app.Commit())
|
||||||
|
|
||||||
|
require.Equal(t, map[string]float64{
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds": 1.0,
|
||||||
|
"prometheus_tsdb_head_min_time_seconds": 1.0,
|
||||||
|
"prometheus_tsdb_head_max_time_seconds": 3.0,
|
||||||
|
}, getCurrentGaugeValuesFor(t, reg,
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds",
|
||||||
|
"prometheus_tsdb_head_min_time_seconds",
|
||||||
|
"prometheus_tsdb_head_max_time_seconds",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 {
|
||||||
|
f, err := reg.Gather()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := make(map[string]float64, len(metricNames))
|
||||||
|
for _, g := range f {
|
||||||
|
for _, m := range metricNames {
|
||||||
|
if g.GetName() != m {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(g.GetMetric()))
|
||||||
|
if _, ok := res[m]; ok {
|
||||||
|
t.Error("expected only one metric family for", m)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
res[m] = *g.GetMetric()[0].GetGauge().Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||||
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
|
actualExitStatus := 0
|
||||||
|
done := make(chan error, 1)
|
||||||
|
|
||||||
|
go func() { done <- prom.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
t.Logf("prometheus agent should be still running: %v", err)
|
||||||
|
actualExitStatus = prom.ProcessState.ExitCode()
|
||||||
|
case <-time.After(startupTime):
|
||||||
|
prom.Process.Kill()
|
||||||
|
}
|
||||||
|
require.Equal(t, 0, actualExitStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
|
|
||||||
|
output := bytes.Buffer{}
|
||||||
|
prom.Stderr = &output
|
||||||
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
|
actualExitStatus := 0
|
||||||
|
done := make(chan error, 1)
|
||||||
|
|
||||||
|
go func() { done <- prom.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
t.Logf("prometheus agent should not be running: %v", err)
|
||||||
|
actualExitStatus = prom.ProcessState.ExitCode()
|
||||||
|
case <-time.After(startupTime):
|
||||||
|
prom.Process.Kill()
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, 3, actualExitStatus)
|
||||||
|
|
||||||
|
// Assert on last line.
|
||||||
|
lines := strings.Split(output.String(), "\n")
|
||||||
|
last := lines[len(lines)-1]
|
||||||
|
require.Equal(t, "The following flag(s) can not be used in agent mode: [\"--storage.tsdb.path\"]", last)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
|
actualExitStatus := 0
|
||||||
|
done := make(chan error, 1)
|
||||||
|
|
||||||
|
go func() { done <- prom.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
t.Logf("prometheus agent should not be running: %v", err)
|
||||||
|
actualExitStatus = prom.ProcessState.ExitCode()
|
||||||
|
case <-time.After(startupTime):
|
||||||
|
prom.Process.Kill()
|
||||||
|
}
|
||||||
|
require.Equal(t, 2, actualExitStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModeSpecificFlags(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
testcases := []struct {
|
||||||
|
mode string
|
||||||
|
arg string
|
||||||
|
exitStatus int
|
||||||
|
}{
|
||||||
|
{"agent", "--storage.agent.path", 0},
|
||||||
|
{"server", "--storage.tsdb.path", 0},
|
||||||
|
{"server", "--storage.agent.path", 3},
|
||||||
|
{"agent", "--storage.tsdb.path", 3},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testcases {
|
||||||
|
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||||
|
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||||
|
|
||||||
|
if tc.mode == "agent" {
|
||||||
|
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||||
|
} else {
|
||||||
|
args = append(args, "--config.file="+promConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, args...)
|
||||||
|
|
||||||
|
// Log stderr in case of failure.
|
||||||
|
stderr, err := prom.StderrPipe()
|
||||||
|
require.NoError(t, err)
|
||||||
|
go func() {
|
||||||
|
slurp, _ := io.ReadAll(stderr)
|
||||||
|
t.Log(string(slurp))
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = prom.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tc.exitStatus == 0 {
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() { done <- prom.Wait() }()
|
||||||
|
select {
|
||||||
|
case err := <-done:
|
||||||
|
t.Errorf("prometheus should be still running: %v", err)
|
||||||
|
case <-time.After(startupTime):
|
||||||
|
prom.Process.Kill()
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = prom.Wait()
|
||||||
|
require.Error(t, err)
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
|
require.Equal(t, tc.exitStatus, status.ExitStatus())
|
||||||
|
} else {
|
||||||
|
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDocumentation(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, promPath, "-test.main", "--write-documentation")
|
||||||
|
|
||||||
|
var stdout bytes.Buffer
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
|
if exitError.ExitCode() != 0 {
|
||||||
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promPath), strings.TrimSuffix(filepath.Base(promPath), ".test"))
|
||||||
|
|
||||||
|
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "prometheus.md"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
|
}
|
||||||
|
|
|
@ -11,16 +11,20 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
//
|
//
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// As soon as prometheus starts responding to http request it should be able to
|
// As soon as prometheus starts responding to http request it should be able to
|
||||||
|
@ -30,14 +34,15 @@ func TestStartupInterrupt(t *testing.T) {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
|
||||||
prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+promData)
|
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port)
|
||||||
err := prom.Start()
|
err := prom.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("execution error: %v", err)
|
t.Fatalf("execution error: %v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
done := make(chan error)
|
done := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
done <- prom.Wait()
|
done <- prom.Wait()
|
||||||
}()
|
}()
|
||||||
|
@ -45,11 +50,13 @@ func TestStartupInterrupt(t *testing.T) {
|
||||||
var startedOk bool
|
var startedOk bool
|
||||||
var stoppedErr error
|
var stoppedErr error
|
||||||
|
|
||||||
|
url := "http://localhost" + port + "/graph"
|
||||||
|
|
||||||
Loop:
|
Loop:
|
||||||
for x := 0; x < 10; x++ {
|
for x := 0; x < 10; x++ {
|
||||||
// error=nil means prometheus has started so we can send the interrupt
|
// error=nil means prometheus has started, so we can send the interrupt
|
||||||
// signal and wait for the graceful shutdown.
|
// signal and wait for the graceful shutdown.
|
||||||
if _, err := http.Get("http://localhost:9090/graph"); err == nil {
|
if _, err := http.Get(url); err == nil {
|
||||||
startedOk = true
|
startedOk = true
|
||||||
prom.Process.Signal(os.Interrupt)
|
prom.Process.Signal(os.Interrupt)
|
||||||
select {
|
select {
|
||||||
|
@ -63,12 +70,13 @@ Loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
if !startedOk {
|
if !startedOk {
|
||||||
t.Errorf("prometheus didn't start in the specified timeout")
|
t.Fatal("prometheus didn't start in the specified timeout")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if err := prom.Process.Kill(); err == nil {
|
switch err := prom.Process.Kill(); {
|
||||||
|
case err == nil:
|
||||||
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
||||||
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
|
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
|
||||||
t.Errorf("prometheus exited with an unexpected error:%v", stoppedErr)
|
// TODO: find a better way to detect when the process didn't exit as expected!
|
||||||
|
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -26,9 +26,12 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -81,22 +84,22 @@ func (p *queryLogTest) waitForPrometheus() error {
|
||||||
// then reloads the configuration if needed.
|
// then reloads the configuration if needed.
|
||||||
func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
|
func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
|
||||||
err := p.configFile.Truncate(0)
|
err := p.configFile.Truncate(0)
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
_, err = p.configFile.Seek(0, 0)
|
_, err = p.configFile.Seek(0, 0)
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
if queryLogFile != "" {
|
if queryLogFile != "" {
|
||||||
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
|
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
_, err = p.configFile.Write([]byte(p.configuration()))
|
_, err = p.configFile.Write([]byte(p.configuration()))
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reloadConfig reloads the configuration using POST.
|
// reloadConfig reloads the configuration using POST.
|
||||||
func (p *queryLogTest) reloadConfig(t *testing.T) {
|
func (p *queryLogTest) reloadConfig(t *testing.T) {
|
||||||
r, err := http.Post(fmt.Sprintf("http://%s:%d%s/-/reload", p.host, p.port, p.prefix), "text/plain", nil)
|
r, err := http.Post(fmt.Sprintf("http://%s:%d%s/-/reload", p.host, p.port, p.prefix), "text/plain", nil)
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
testutil.Equals(t, 200, r.StatusCode)
|
require.Equal(t, 200, r.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// query runs a query according to the test origin.
|
// query runs a query according to the test origin.
|
||||||
|
@ -104,14 +107,14 @@ func (p *queryLogTest) query(t *testing.T) {
|
||||||
switch p.origin {
|
switch p.origin {
|
||||||
case apiOrigin:
|
case apiOrigin:
|
||||||
r, err := http.Get(fmt.Sprintf(
|
r, err := http.Get(fmt.Sprintf(
|
||||||
"http://%s:%d%s/api/v1/query?query=%s",
|
"http://%s:%d%s/api/v1/query_range?step=5&start=0&end=3600&query=%s",
|
||||||
p.host,
|
p.host,
|
||||||
p.port,
|
p.port,
|
||||||
p.prefix,
|
p.prefix,
|
||||||
url.QueryEscape("query_with_api"),
|
url.QueryEscape("query_with_api"),
|
||||||
))
|
))
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
testutil.Equals(t, 200, r.StatusCode)
|
require.Equal(t, 200, r.StatusCode)
|
||||||
case consoleOrigin:
|
case consoleOrigin:
|
||||||
r, err := http.Get(fmt.Sprintf(
|
r, err := http.Get(fmt.Sprintf(
|
||||||
"http://%s:%d%s/consoles/test.html",
|
"http://%s:%d%s/consoles/test.html",
|
||||||
|
@ -119,8 +122,8 @@ func (p *queryLogTest) query(t *testing.T) {
|
||||||
p.port,
|
p.port,
|
||||||
p.prefix,
|
p.prefix,
|
||||||
))
|
))
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
testutil.Equals(t, 200, r.StatusCode)
|
require.Equal(t, 200, r.StatusCode)
|
||||||
case ruleOrigin:
|
case ruleOrigin:
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
default:
|
default:
|
||||||
|
@ -146,25 +149,33 @@ func (p *queryLogTest) queryString() string {
|
||||||
// test parameters.
|
// test parameters.
|
||||||
func (p *queryLogTest) validateLastQuery(t *testing.T, ql []queryLogLine) {
|
func (p *queryLogTest) validateLastQuery(t *testing.T, ql []queryLogLine) {
|
||||||
q := ql[len(ql)-1]
|
q := ql[len(ql)-1]
|
||||||
testutil.Equals(t, p.queryString(), q.Params.Query)
|
require.Equal(t, p.queryString(), q.Params.Query)
|
||||||
testutil.Equals(t, 0, q.Params.Step)
|
|
||||||
|
switch p.origin {
|
||||||
|
case apiOrigin:
|
||||||
|
require.Equal(t, 5, q.Params.Step)
|
||||||
|
require.Equal(t, "1970-01-01T00:00:00.000Z", q.Params.Start)
|
||||||
|
require.Equal(t, "1970-01-01T01:00:00.000Z", q.Params.End)
|
||||||
|
default:
|
||||||
|
require.Equal(t, 0, q.Params.Step)
|
||||||
|
}
|
||||||
|
|
||||||
if p.origin != ruleOrigin {
|
if p.origin != ruleOrigin {
|
||||||
host := p.host
|
host := p.host
|
||||||
if host == "[::1]" {
|
if host == "[::1]" {
|
||||||
host = "::1"
|
host = "::1"
|
||||||
}
|
}
|
||||||
testutil.Equals(t, host, q.Request.ClientIP)
|
require.Equal(t, host, q.Request.ClientIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch p.origin {
|
switch p.origin {
|
||||||
case apiOrigin:
|
case apiOrigin:
|
||||||
testutil.Equals(t, p.prefix+"/api/v1/query", q.Request.Path)
|
require.Equal(t, p.prefix+"/api/v1/query_range", q.Request.Path)
|
||||||
case consoleOrigin:
|
case consoleOrigin:
|
||||||
testutil.Equals(t, p.prefix+"/consoles/test.html", q.Request.Path)
|
require.Equal(t, p.prefix+"/consoles/test.html", q.Request.Path)
|
||||||
case ruleOrigin:
|
case ruleOrigin:
|
||||||
testutil.Equals(t, "querylogtest", q.RuleGroup.Name)
|
require.Equal(t, "querylogtest", q.RuleGroup.Name)
|
||||||
testutil.Equals(t, filepath.Join(p.cwd, "testdata", "rules", "test.yml"), q.RuleGroup.File)
|
require.Equal(t, filepath.Join(p.cwd, "testdata", "rules", "test.yml"), q.RuleGroup.File)
|
||||||
default:
|
default:
|
||||||
panic("unknown origin")
|
panic("unknown origin")
|
||||||
}
|
}
|
||||||
|
@ -182,7 +193,7 @@ func (p *queryLogTest) String() string {
|
||||||
}
|
}
|
||||||
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
||||||
if p.enabledAtStart {
|
if p.enabledAtStart {
|
||||||
name = name + ", enabled at start"
|
name += ", enabled at start"
|
||||||
}
|
}
|
||||||
if p.prefix != "" {
|
if p.prefix != "" {
|
||||||
name = name + ", with prefix " + p.prefix
|
name = name + ", with prefix " + p.prefix
|
||||||
|
@ -213,7 +224,7 @@ func (p *queryLogTest) configuration() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// exactQueryCount returns wheter we can match an exact query count. False on
|
// exactQueryCount returns whether we can match an exact query count. False on
|
||||||
// recording rules are they are regular time intervals.
|
// recording rules are they are regular time intervals.
|
||||||
func (p *queryLogTest) exactQueryCount() bool {
|
func (p *queryLogTest) exactQueryCount() bool {
|
||||||
return p.origin != ruleOrigin
|
return p.origin != ruleOrigin
|
||||||
|
@ -224,11 +235,11 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
p.skip(t)
|
p.skip(t)
|
||||||
|
|
||||||
// Setup temporary files for this test.
|
// Setup temporary files for this test.
|
||||||
queryLogFile, err := ioutil.TempFile("", "query")
|
queryLogFile, err := os.CreateTemp("", "query")
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
defer os.Remove(queryLogFile.Name())
|
defer os.Remove(queryLogFile.Name())
|
||||||
p.configFile, err = ioutil.TempFile("", "config")
|
p.configFile, err = os.CreateTemp("", "config")
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
defer os.Remove(p.configFile.Name())
|
defer os.Remove(p.configFile.Name())
|
||||||
|
|
||||||
if p.enabledAtStart {
|
if p.enabledAtStart {
|
||||||
|
@ -237,29 +248,43 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
p.setQueryLog(t, "")
|
p.setQueryLog(t, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
params := append([]string{"-test.main", "--config.file=" + p.configFile.Name(), "--web.enable-lifecycle", fmt.Sprintf("--web.listen-address=%s:%d", p.host, p.port)}, p.params()...)
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
params := append([]string{
|
||||||
|
"-test.main",
|
||||||
|
"--config.file=" + p.configFile.Name(),
|
||||||
|
"--web.enable-lifecycle",
|
||||||
|
fmt.Sprintf("--web.listen-address=%s:%d", p.host, p.port),
|
||||||
|
"--storage.tsdb.path=" + dir,
|
||||||
|
}, p.params()...)
|
||||||
|
|
||||||
prom := exec.Command(promPath, params...)
|
prom := exec.Command(promPath, params...)
|
||||||
|
|
||||||
// Log stderr in case of failure.
|
// Log stderr in case of failure.
|
||||||
stderr, err := prom.StderrPipe()
|
stderr, err := prom.StderrPipe()
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// We use a WaitGroup to avoid calling t.Log after the test is done.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
defer wg.Wait()
|
||||||
go func() {
|
go func() {
|
||||||
slurp, _ := ioutil.ReadAll(stderr)
|
slurp, _ := io.ReadAll(stderr)
|
||||||
t.Log(string(slurp))
|
t.Log(string(slurp))
|
||||||
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
testutil.Ok(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
prom.Wait()
|
prom.Wait()
|
||||||
}()
|
}()
|
||||||
testutil.Ok(t, p.waitForPrometheus())
|
require.NoError(t, p.waitForPrometheus())
|
||||||
|
|
||||||
if !p.enabledAtStart {
|
if !p.enabledAtStart {
|
||||||
p.query(t)
|
p.query(t)
|
||||||
testutil.Equals(t, 0, len(readQueryLog(t, queryLogFile.Name())))
|
require.Equal(t, 0, len(readQueryLog(t, queryLogFile.Name())))
|
||||||
p.setQueryLog(t, queryLogFile.Name())
|
p.setQueryLog(t, queryLogFile.Name())
|
||||||
p.reloadConfig(t)
|
p.reloadConfig(t)
|
||||||
}
|
}
|
||||||
|
@ -269,9 +294,9 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
ql := readQueryLog(t, queryLogFile.Name())
|
ql := readQueryLog(t, queryLogFile.Name())
|
||||||
qc := len(ql)
|
qc := len(ql)
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
testutil.Equals(t, 1, qc)
|
require.Equal(t, 1, qc)
|
||||||
} else {
|
} else {
|
||||||
testutil.Assert(t, qc > 0, "no queries logged")
|
require.Greater(t, qc, 0, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
|
|
||||||
|
@ -284,7 +309,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
p.query(t)
|
p.query(t)
|
||||||
|
|
||||||
ql = readQueryLog(t, queryLogFile.Name())
|
ql = readQueryLog(t, queryLogFile.Name())
|
||||||
testutil.Equals(t, qc, len(ql))
|
require.Equal(t, qc, len(ql))
|
||||||
|
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
p.setQueryLog(t, queryLogFile.Name())
|
p.setQueryLog(t, queryLogFile.Name())
|
||||||
|
@ -295,9 +320,9 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
|
|
||||||
ql = readQueryLog(t, queryLogFile.Name())
|
ql = readQueryLog(t, queryLogFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
testutil.Equals(t, qc, len(ql))
|
require.Equal(t, qc, len(ql))
|
||||||
} else {
|
} else {
|
||||||
testutil.Assert(t, len(ql) > qc, "no queries logged")
|
require.Greater(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
|
@ -308,14 +333,14 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Move the file, Prometheus should still write to the old file.
|
// Move the file, Prometheus should still write to the old file.
|
||||||
newFile, err := ioutil.TempFile("", "newLoc")
|
newFile, err := os.CreateTemp("", "newLoc")
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
testutil.Ok(t, newFile.Close())
|
require.NoError(t, newFile.Close())
|
||||||
defer os.Remove(newFile.Name())
|
defer os.Remove(newFile.Name())
|
||||||
testutil.Ok(t, os.Rename(queryLogFile.Name(), newFile.Name()))
|
require.NoError(t, os.Rename(queryLogFile.Name(), newFile.Name()))
|
||||||
ql = readQueryLog(t, newFile.Name())
|
ql = readQueryLog(t, newFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
testutil.Equals(t, qc, len(ql))
|
require.Equal(t, qc, len(ql))
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
|
@ -326,9 +351,9 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
|
|
||||||
ql = readQueryLog(t, newFile.Name())
|
ql = readQueryLog(t, newFile.Name())
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
testutil.Equals(t, qc, len(ql))
|
require.Equal(t, qc, len(ql))
|
||||||
} else {
|
} else {
|
||||||
testutil.Assert(t, len(ql) > qc, "no queries logged")
|
require.Greater(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
|
|
||||||
|
@ -339,9 +364,9 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
ql = readQueryLog(t, queryLogFile.Name())
|
ql = readQueryLog(t, queryLogFile.Name())
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
testutil.Equals(t, 1, qc)
|
require.Equal(t, 1, qc)
|
||||||
} else {
|
} else {
|
||||||
testutil.Assert(t, qc > 0, "no queries logged")
|
require.Greater(t, qc, 0, "no queries logged")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,6 +374,8 @@ type queryLogLine struct {
|
||||||
Params struct {
|
Params struct {
|
||||||
Query string `json:"query"`
|
Query string `json:"query"`
|
||||||
Step int `json:"step"`
|
Step int `json:"step"`
|
||||||
|
Start string `json:"start"`
|
||||||
|
End string `json:"end"`
|
||||||
} `json:"params"`
|
} `json:"params"`
|
||||||
Request struct {
|
Request struct {
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
|
@ -364,12 +391,12 @@ type queryLogLine struct {
|
||||||
func readQueryLog(t *testing.T, path string) []queryLogLine {
|
func readQueryLog(t *testing.T, path string) []queryLogLine {
|
||||||
ql := []queryLogLine{}
|
ql := []queryLogLine{}
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
var q queryLogLine
|
var q queryLogLine
|
||||||
testutil.Ok(t, json.Unmarshal(scanner.Bytes(), &q))
|
require.NoError(t, json.Unmarshal(scanner.Bytes(), &q))
|
||||||
ql = append(ql, q)
|
ql = append(ql, q)
|
||||||
}
|
}
|
||||||
return ql
|
return ql
|
||||||
|
@ -381,9 +408,8 @@ func TestQueryLog(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
testutil.Ok(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
port := 15000
|
|
||||||
for _, host := range []string{"127.0.0.1", "[::1]"} {
|
for _, host := range []string{"127.0.0.1", "[::1]"} {
|
||||||
for _, prefix := range []string{"", "/foobar"} {
|
for _, prefix := range []string{"", "/foobar"} {
|
||||||
for _, enabledAtStart := range []bool{true, false} {
|
for _, enabledAtStart := range []bool{true, false} {
|
||||||
|
@ -393,7 +419,7 @@ func TestQueryLog(t *testing.T) {
|
||||||
host: host,
|
host: host,
|
||||||
enabledAtStart: enabledAtStart,
|
enabledAtStart: enabledAtStart,
|
||||||
prefix: prefix,
|
prefix: prefix,
|
||||||
port: port,
|
port: testutil.RandomUnprivilegedPort(t),
|
||||||
cwd: cwd,
|
cwd: cwd,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,11 @@ package main
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const filePerm = 0644
|
const filePerm = 0o666
|
||||||
|
|
||||||
type tarGzFileWriter struct {
|
type tarGzFileWriter struct {
|
||||||
tarWriter *tar.Writer
|
tarWriter *tar.Writer
|
||||||
|
@ -32,7 +31,7 @@ type tarGzFileWriter struct {
|
||||||
func newTarGzFileWriter(archiveName string) (*tarGzFileWriter, error) {
|
func newTarGzFileWriter(archiveName string) (*tarGzFileWriter, error) {
|
||||||
file, err := os.Create(archiveName)
|
file, err := os.Create(archiveName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error creating archive %q", archiveName)
|
return nil, fmt.Errorf("error creating archive %q: %w", archiveName, err)
|
||||||
}
|
}
|
||||||
gzw := gzip.NewWriter(file)
|
gzw := gzip.NewWriter(file)
|
||||||
tw := tar.NewWriter(gzw)
|
tw := tar.NewWriter(gzw)
|
||||||
|
|
228
cmd/promtool/backfill.go
Normal file
228
cmd/promtool/backfill.go
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
||||||
|
var maxt, mint int64 = math.MinInt64, math.MaxInt64
|
||||||
|
|
||||||
|
for {
|
||||||
|
entry, err := p.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("next: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry != textparse.EntrySeries {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ts, _ := p.Series()
|
||||||
|
if ts == nil {
|
||||||
|
return 0, 0, fmt.Errorf("expected timestamp for series got none")
|
||||||
|
}
|
||||||
|
|
||||||
|
if *ts > maxt {
|
||||||
|
maxt = *ts
|
||||||
|
}
|
||||||
|
if *ts < mint {
|
||||||
|
mint = *ts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxt == math.MinInt64 {
|
||||||
|
maxt = 0
|
||||||
|
}
|
||||||
|
if mint == math.MaxInt64 {
|
||||||
|
mint = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxt, mint, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCompatibleBlockDuration(maxBlockDuration int64) int64 {
|
||||||
|
blockDuration := tsdb.DefaultBlockDuration
|
||||||
|
if maxBlockDuration > tsdb.DefaultBlockDuration {
|
||||||
|
ranges := tsdb.ExponentialBlockRanges(tsdb.DefaultBlockDuration, 10, 3)
|
||||||
|
idx := len(ranges) - 1 // Use largest range if user asked for something enormous.
|
||||||
|
for i, v := range ranges {
|
||||||
|
if v > maxBlockDuration {
|
||||||
|
idx = i - 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
blockDuration = ranges[idx]
|
||||||
|
}
|
||||||
|
return blockDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) {
|
||||||
|
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
||||||
|
mint = blockDuration * (mint / blockDuration)
|
||||||
|
|
||||||
|
db, err := tsdb.OpenDBReadOnly(outputDir, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
returnErr = tsdb_errors.NewMulti(returnErr, db.Close()).Err()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var (
|
||||||
|
wroteHeader bool
|
||||||
|
nextSampleTs int64 = math.MaxInt64
|
||||||
|
)
|
||||||
|
|
||||||
|
for t := mint; t <= maxt; t += blockDuration {
|
||||||
|
tsUpper := t + blockDuration
|
||||||
|
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
||||||
|
// The next sample is not in this timerange, we can avoid parsing
|
||||||
|
// the file for this timerange.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nextSampleTs = math.MaxInt64
|
||||||
|
|
||||||
|
err := func() error {
|
||||||
|
// To prevent races with compaction, a block writer only allows appending samples
|
||||||
|
// that are at most half a block size older than the most recent sample appended so far.
|
||||||
|
// However, in the way we use the block writer here, compaction doesn't happen, while we
|
||||||
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
|
// original interval later.
|
||||||
|
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("block writer: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
app := w.Appender(ctx)
|
||||||
|
p := textparse.NewOpenMetricsParser(input)
|
||||||
|
samplesCount := 0
|
||||||
|
for {
|
||||||
|
e, err := p.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parse: %w", err)
|
||||||
|
}
|
||||||
|
if e != textparse.EntrySeries {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ts, v := p.Series()
|
||||||
|
if ts == nil {
|
||||||
|
l := labels.Labels{}
|
||||||
|
p.Metric(&l)
|
||||||
|
return fmt.Errorf("expected timestamp for series %v, got none", l)
|
||||||
|
}
|
||||||
|
if *ts < t {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if *ts >= tsUpper {
|
||||||
|
if *ts < nextSampleTs {
|
||||||
|
nextSampleTs = *ts
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
l := labels.Labels{}
|
||||||
|
p.Metric(&l)
|
||||||
|
|
||||||
|
if _, err := app.Append(0, l, *ts, v); err != nil {
|
||||||
|
return fmt.Errorf("add sample: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
samplesCount++
|
||||||
|
if samplesCount < maxSamplesInAppender {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we arrive here, the samples count is greater than the maxSamplesInAppender.
|
||||||
|
// Therefore the old appender is committed and a new one is created.
|
||||||
|
// This prevents keeping too many samples lined up in an appender and thus in RAM.
|
||||||
|
if err := app.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("commit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app = w.Appender(ctx)
|
||||||
|
samplesCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := app.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("commit: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := w.Flush(ctx)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
if quiet {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
blocks, err := db.Blocks()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get blocks: %w", err)
|
||||||
|
}
|
||||||
|
for _, b := range blocks {
|
||||||
|
if b.Meta().ULID == block {
|
||||||
|
printBlocks([]tsdb.BlockReader{b}, !wroteHeader, humanReadable)
|
||||||
|
wroteHeader = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case errors.Is(err, tsdb.ErrNoSeriesAppended):
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("flush: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("process blocks: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
|
||||||
|
p := textparse.NewOpenMetricsParser(input)
|
||||||
|
maxt, mint, err := getMinAndMaxTimestamps(p)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting min and max timestamp: %w", err)
|
||||||
|
}
|
||||||
|
if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil {
|
||||||
|
return fmt.Errorf("block creation: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
710
cmd/promtool/backfill_test.go
Normal file
710
cmd/promtool/backfill_test.go
Normal file
|
@ -0,0 +1,710 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type backfillSample struct {
|
||||||
|
Timestamp int64
|
||||||
|
Value float64
|
||||||
|
Labels labels.Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortSamples(samples []backfillSample) {
|
||||||
|
sort.Slice(samples, func(x, y int) bool {
|
||||||
|
sx, sy := samples[x], samples[y]
|
||||||
|
if sx.Timestamp != sy.Timestamp {
|
||||||
|
return sx.Timestamp < sy.Timestamp
|
||||||
|
}
|
||||||
|
return sx.Value < sy.Value
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
|
||||||
|
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
|
samples := []backfillSample{}
|
||||||
|
for ss.Next() {
|
||||||
|
series := ss.At()
|
||||||
|
it := series.Iterator(nil)
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
for it.Next() == chunkenc.ValFloat {
|
||||||
|
ts, v := it.At()
|
||||||
|
samples = append(samples, backfillSample{Timestamp: ts, Value: v, Labels: series.Labels()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return samples
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, expectedBlockDuration int64, expectedSamples []backfillSample, expectedNumBlocks int) {
|
||||||
|
blocks := db.Blocks()
|
||||||
|
require.Equal(t, expectedNumBlocks, len(blocks), "did not create correct number of blocks")
|
||||||
|
|
||||||
|
for i, block := range blocks {
|
||||||
|
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
allSamples := queryAllSeries(t, q, expectedMinTime, expectedMaxTime)
|
||||||
|
sortSamples(allSamples)
|
||||||
|
sortSamples(expectedSamples)
|
||||||
|
require.Equal(t, expectedSamples, allSamples, "did not create correct samples")
|
||||||
|
|
||||||
|
if len(allSamples) > 0 {
|
||||||
|
require.Equal(t, expectedMinTime, allSamples[0].Timestamp, "timestamp of first sample is not the expected minimum time")
|
||||||
|
require.Equal(t, expectedMaxTime, allSamples[len(allSamples)-1].Timestamp, "timestamp of last sample is not the expected maximum time")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackfill(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
ToParse string
|
||||||
|
IsOk bool
|
||||||
|
Description string
|
||||||
|
MaxSamplesInAppender int
|
||||||
|
MaxBlockDuration time.Duration
|
||||||
|
Expected struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
ToParse: `# EOF`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Empty file.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: math.MaxInt64,
|
||||||
|
MaxTime: math.MinInt64,
|
||||||
|
NumBlocks: 0,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
http_requests_total{code="400"} 1 1565133713.990
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Multiple samples with different timestamp for different series.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565133713990,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713990,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
http_requests_total{code="200"} 1022 1565392913.989
|
||||||
|
http_requests_total{code="200"} 1023 1565652113.989
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Multiple samples separated by 3 days.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565652113989,
|
||||||
|
NumBlocks: 3,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565392913989,
|
||||||
|
Value: 1022,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565652113989,
|
||||||
|
Value: 1023,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# TYPE go info
|
||||||
|
go_info{version="go1.15.3"} 1 1565392913.989
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Unordered samples from multiple series, which end in different blocks.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565392913989,
|
||||||
|
NumBlocks: 2,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565392913989,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "go_info", "version", "go1.15.3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
http_requests_total{code="200"} 1 1565133714.989
|
||||||
|
http_requests_total{code="400"} 2 1565133715.989
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Multiple samples with different timestamp for the same series.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565133715989,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565133714989,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565133715989,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1624463088.000
|
||||||
|
http_requests_total{code="200"} 1 1627055153.000
|
||||||
|
http_requests_total{code="400"} 2 1627056153.000
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Long maximum block duration puts all data into one block.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
MaxBlockDuration: 2048 * time.Hour,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1624463088000,
|
||||||
|
MaxTime: 1627056153000,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1624463088000,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1627055153000,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1627056153000,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1 1624463088.000
|
||||||
|
http_requests_total{code="200"} 2 1629503088.000
|
||||||
|
http_requests_total{code="200"} 3 1629863088.000
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Long maximum block duration puts all data into two blocks.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
MaxBlockDuration: 2048 * time.Hour,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1624463088000,
|
||||||
|
MaxTime: 1629863088000,
|
||||||
|
NumBlocks: 2,
|
||||||
|
BlockDuration: int64(1458 * time.Hour / time.Millisecond),
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1624463088000,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1629503088000,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1629863088000,
|
||||||
|
Value: 3,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1 1624463088.000
|
||||||
|
http_requests_total{code="200"} 2 1765943088.000
|
||||||
|
http_requests_total{code="200"} 3 1768463088.000
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Maximum block duration longer than longest possible duration, uses largest duration, puts all data into two blocks.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
MaxBlockDuration: 200000 * time.Hour,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1624463088000,
|
||||||
|
MaxTime: 1768463088000,
|
||||||
|
NumBlocks: 2,
|
||||||
|
BlockDuration: int64(39366 * time.Hour / time.Millisecond),
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1624463088000,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1765943088000,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1768463088000,
|
||||||
|
Value: 3,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
http_requests_total{code="200"} 1022 1565144513.989
|
||||||
|
http_requests_total{code="400"} 2 1565155313.989
|
||||||
|
http_requests_total{code="400"} 1 1565166113.989
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Multiple samples that end up in different blocks.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565166113989,
|
||||||
|
NumBlocks: 4,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565144513989,
|
||||||
|
Value: 1022,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565155313989,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565166113989,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 1565133713.989
|
||||||
|
http_requests_total{code="200"} 1022 1565133714
|
||||||
|
http_requests_total{code="200"} 1023 1565133716
|
||||||
|
http_requests_total{code="200"} 1022 1565144513.989
|
||||||
|
http_requests_total{code="400"} 2 1565155313.989
|
||||||
|
http_requests_total{code="400"} 3 1565155314
|
||||||
|
http_requests_total{code="400"} 1 1565166113.989
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Number of samples are greater than the sample batch size.",
|
||||||
|
MaxSamplesInAppender: 2,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1565133713989,
|
||||||
|
MaxTime: 1565166113989,
|
||||||
|
NumBlocks: 4,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1565133713989,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565133714000,
|
||||||
|
Value: 1022,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565133716000,
|
||||||
|
Value: 1023,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565144513989,
|
||||||
|
Value: 1022,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565155313989,
|
||||||
|
Value: 2,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565155314000,
|
||||||
|
Value: 3,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 1565166113989,
|
||||||
|
Value: 1,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{ // For https://github.com/prometheus/prometheus/issues/8476.
|
||||||
|
ToParse: `# HELP http_requests_total The total number of HTTP requests.
|
||||||
|
# TYPE http_requests_total counter
|
||||||
|
http_requests_total{code="200"} 1021 0
|
||||||
|
http_requests_total{code="200"} 1022 7199
|
||||||
|
http_requests_total{code="400"} 1023 0
|
||||||
|
http_requests_total{code="400"} 1024 7199
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "One series spanning 2h in same block should not cause problems to other series.",
|
||||||
|
MaxSamplesInAppender: 1,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 0,
|
||||||
|
MaxTime: 7199000,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 1021,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 7199000,
|
||||||
|
Value: 1022,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 0,
|
||||||
|
Value: 1023,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Timestamp: 7199000,
|
||||||
|
Value: 1024,
|
||||||
|
Labels: labels.FromStrings("__name__", "http_requests_total", "code", "400"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `no_help_no_type{foo="bar"} 42 6900
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Sample with no #HELP or #TYPE keyword.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 6900000,
|
||||||
|
MaxTime: 6900000,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 6900000,
|
||||||
|
Value: 42,
|
||||||
|
Labels: labels.FromStrings("__name__", "no_help_no_type", "foo", "bar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `no_newline_after_eof 42 6900
|
||||||
|
# EOF`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Sample without newline after # EOF.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 6900000,
|
||||||
|
MaxTime: 6900000,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 6900000,
|
||||||
|
Value: 42,
|
||||||
|
Labels: labels.FromStrings("__name__", "no_newline_after_eof"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `bare_metric 42.24 1001
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: true,
|
||||||
|
Description: "Bare sample.",
|
||||||
|
MaxSamplesInAppender: 5000,
|
||||||
|
Expected: struct {
|
||||||
|
MinTime int64
|
||||||
|
MaxTime int64
|
||||||
|
NumBlocks int
|
||||||
|
BlockDuration int64
|
||||||
|
Samples []backfillSample
|
||||||
|
}{
|
||||||
|
MinTime: 1001000,
|
||||||
|
MaxTime: 1001000,
|
||||||
|
NumBlocks: 1,
|
||||||
|
BlockDuration: tsdb.DefaultBlockDuration,
|
||||||
|
Samples: []backfillSample{
|
||||||
|
{
|
||||||
|
Timestamp: 1001000,
|
||||||
|
Value: 42.24,
|
||||||
|
Labels: labels.FromStrings("__name__", "bare_metric"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds.
|
||||||
|
# TYPE rpc_duration_seconds summary
|
||||||
|
rpc_duration_seconds{quantile="0.01"} 3102
|
||||||
|
rpc_duration_seconds{quantile="0.05"} 3272
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: false,
|
||||||
|
Description: "Does not have timestamp.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP bad_metric This a bad metric
|
||||||
|
# TYPE bad_metric bad_type
|
||||||
|
bad_metric{type="has a bad type information"} 0.0 111
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: false,
|
||||||
|
Description: "Has a bad type information.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP no_nl This test has no newline so will fail
|
||||||
|
# TYPE no_nl gauge
|
||||||
|
no_nl{type="no newline"}
|
||||||
|
# EOF
|
||||||
|
`,
|
||||||
|
IsOk: false,
|
||||||
|
Description: "No newline.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP no_eof This test has no EOF so will fail
|
||||||
|
# TYPE no_eof gauge
|
||||||
|
no_eof 1 1
|
||||||
|
`,
|
||||||
|
IsOk: false,
|
||||||
|
Description: "No EOF.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ToParse: `# HELP after_eof There is data after EOF.
|
||||||
|
# TYPE after_eof gauge
|
||||||
|
after_eof 1 1
|
||||||
|
# EOF
|
||||||
|
after_eof 1 2
|
||||||
|
`,
|
||||||
|
IsOk: false,
|
||||||
|
Description: "Data after EOF.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.Description, func(t *testing.T) {
|
||||||
|
t.Logf("Test:%s", test.Description)
|
||||||
|
|
||||||
|
outputDir := t.TempDir()
|
||||||
|
|
||||||
|
err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration)
|
||||||
|
|
||||||
|
if !test.IsOk {
|
||||||
|
require.Error(t, err, test.Description)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
options := tsdb.DefaultOptions()
|
||||||
|
options.RetentionDuration = int64(10 * 365 * 24 * time.Hour / time.Millisecond) // maximum duration tests require a long retention
|
||||||
|
db, err := tsdb.Open(outputDir, nil, nil, options, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
testBlocks(t, db, test.Expected.MinTime, test.Expected.MaxTime, test.Expected.BlockDuration, test.Expected.Samples, test.Expected.NumBlocks)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -15,10 +15,8 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type debugWriterConfig struct {
|
type debugWriterConfig struct {
|
||||||
|
@ -30,7 +28,7 @@ type debugWriterConfig struct {
|
||||||
func debugWrite(cfg debugWriterConfig) error {
|
func debugWrite(cfg debugWriterConfig) error {
|
||||||
archiver, err := newTarGzFileWriter(cfg.tarballName)
|
archiver, err := newTarGzFileWriter(cfg.tarballName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error creating a new archiver")
|
return fmt.Errorf("error creating a new archiver: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, endPointGroup := range cfg.endPointGroups {
|
for _, endPointGroup := range cfg.endPointGroups {
|
||||||
|
@ -39,29 +37,28 @@ func debugWrite(cfg debugWriterConfig) error {
|
||||||
fmt.Println("collecting:", url)
|
fmt.Println("collecting:", url)
|
||||||
res, err := http.Get(url)
|
res, err := http.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error executing HTTP request")
|
return fmt.Errorf("error executing HTTP request: %w", err)
|
||||||
}
|
}
|
||||||
body, err := ioutil.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
res.Body.Close()
|
res.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error reading the response body")
|
return fmt.Errorf("error reading the response body: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if endPointGroup.postProcess != nil {
|
if endPointGroup.postProcess != nil {
|
||||||
body, err = endPointGroup.postProcess(body)
|
body, err = endPointGroup.postProcess(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error post-processing HTTP response body")
|
return fmt.Errorf("error post-processing HTTP response body: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := archiver.write(filename, body); err != nil {
|
if err := archiver.write(filename, body); err != nil {
|
||||||
return errors.Wrap(err, "error writing into the archive")
|
return fmt.Errorf("error writing into the archive: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archiver.close(); err != nil {
|
if err := archiver.close(); err != nil {
|
||||||
return errors.Wrap(err, "error closing archive writer")
|
return fmt.Errorf("error closing archive writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Compiling debug information complete, all files written in %q.\n", cfg.tarballName)
|
fmt.Printf("Compiling debug information complete, all files written in %q.\n", cfg.tarballName)
|
||||||
|
|
1203
cmd/promtool/main.go
1203
cmd/promtool/main.go
File diff suppressed because it is too large
Load diff
|
@ -14,54 +14,80 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/rulefmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var promtoolPath = os.Args[0]
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
for i, arg := range os.Args {
|
||||||
|
if arg == "-test.main" {
|
||||||
|
os.Args = append(os.Args[:i], os.Args[i+1:]...)
|
||||||
|
main()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exitCode := m.Run()
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
func TestQueryRange(t *testing.T) {
|
func TestQueryRange(t *testing.T) {
|
||||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
p := &promqlPrinter{}
|
urlObject, err := url.Parse(s.URL)
|
||||||
exitCode := QueryRange(s.URL, map[string]string{}, "up", "0", "300", 0, p)
|
require.Equal(t, nil, err)
|
||||||
expectedPath := "/api/v1/query_range"
|
|
||||||
gotPath := getRequest().URL.Path
|
|
||||||
if gotPath != expectedPath {
|
|
||||||
t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath)
|
|
||||||
}
|
|
||||||
form := getRequest().Form
|
|
||||||
actual := form.Get("query")
|
|
||||||
if actual != "up" {
|
|
||||||
t.Errorf("unexpected value %s for query", actual)
|
|
||||||
}
|
|
||||||
actual = form.Get("step")
|
|
||||||
if actual != "1" {
|
|
||||||
t.Errorf("unexpected value %s for step", actual)
|
|
||||||
}
|
|
||||||
if exitCode > 0 {
|
|
||||||
t.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
exitCode = QueryRange(s.URL, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
p := &promqlPrinter{}
|
||||||
gotPath = getRequest().URL.Path
|
exitCode := QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 0, p)
|
||||||
if gotPath != expectedPath {
|
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||||
t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath)
|
form := getRequest().Form
|
||||||
}
|
require.Equal(t, "up", form.Get("query"))
|
||||||
|
require.Equal(t, "1", form.Get("step"))
|
||||||
|
require.Equal(t, 0, exitCode)
|
||||||
|
|
||||||
|
exitCode = QueryRange(urlObject, http.DefaultTransport, map[string]string{}, "up", "0", "300", 10*time.Millisecond, p)
|
||||||
|
require.Equal(t, "/api/v1/query_range", getRequest().URL.Path)
|
||||||
form = getRequest().Form
|
form = getRequest().Form
|
||||||
actual = form.Get("query")
|
require.Equal(t, "up", form.Get("query"))
|
||||||
if actual != "up" {
|
require.Equal(t, "0.01", form.Get("step"))
|
||||||
t.Errorf("unexpected value %s for query", actual)
|
require.Equal(t, 0, exitCode)
|
||||||
}
|
}
|
||||||
actual = form.Get("step")
|
|
||||||
if actual != "0.01" {
|
func TestQueryInstant(t *testing.T) {
|
||||||
t.Errorf("unexpected value %s for step", actual)
|
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
|
||||||
}
|
defer s.Close()
|
||||||
if exitCode > 0 {
|
|
||||||
t.Error()
|
urlObject, err := url.Parse(s.URL)
|
||||||
}
|
require.Equal(t, nil, err)
|
||||||
|
|
||||||
|
p := &promqlPrinter{}
|
||||||
|
exitCode := QueryInstant(urlObject, http.DefaultTransport, "up", "300", p)
|
||||||
|
require.Equal(t, "/api/v1/query", getRequest().URL.Path)
|
||||||
|
form := getRequest().Form
|
||||||
|
require.Equal(t, "up", form.Get("query"))
|
||||||
|
require.Equal(t, "300", form.Get("time"))
|
||||||
|
require.Equal(t, 0, exitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mockServer(code int, body string) (*httptest.Server, func() *http.Request) {
|
func mockServer(code int, body string) (*httptest.Server, func() *http.Request) {
|
||||||
|
@ -78,3 +104,448 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request)
|
||||||
}
|
}
|
||||||
return server, f
|
return server, f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCheckSDFile(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "good .yml",
|
||||||
|
file: "./testdata/good-sd-file.yml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "good .yaml",
|
||||||
|
file: "./testdata/good-sd-file.yaml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "good .json",
|
||||||
|
file: "./testdata/good-sd-file.json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad file extension",
|
||||||
|
file: "./testdata/bad-sd-file-extension.nonexistant",
|
||||||
|
err: "invalid file extension: \".nonexistant\"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad format",
|
||||||
|
file: "./testdata/bad-sd-file-format.yml",
|
||||||
|
err: "yaml: unmarshal errors:\n line 1: field targats not found in type struct { Targets []string \"yaml:\\\"targets\\\"\"; Labels model.LabelSet \"yaml:\\\"labels\\\"\" }",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range cases {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
_, err := checkSDFile(test.file)
|
||||||
|
if test.err != "" {
|
||||||
|
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckDuplicates(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
ruleFile string
|
||||||
|
expectedDups []compareRuleType
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no duplicates",
|
||||||
|
ruleFile: "./testdata/rules.yml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate in other group",
|
||||||
|
ruleFile: "./testdata/rules_duplicates.yml",
|
||||||
|
expectedDups: []compareRuleType{
|
||||||
|
{
|
||||||
|
metric: "job:test:count_over_time1m",
|
||||||
|
label: labels.New(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range cases {
|
||||||
|
c := test
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
||||||
|
require.Empty(t, err)
|
||||||
|
dups := checkDuplicates(rgs.Groups)
|
||||||
|
require.Equal(t, c.expectedDups, dups)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCheckDuplicates(b *testing.B) {
|
||||||
|
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml")
|
||||||
|
require.Empty(b, err)
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
checkDuplicates(rgs.Groups)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckTargetConfig(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "url_in_scrape_targetgroup_with_relabel_config.good",
|
||||||
|
file: "url_in_scrape_targetgroup_with_relabel_config.good.yml",
|
||||||
|
err: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url_in_alert_targetgroup_with_relabel_config.good",
|
||||||
|
file: "url_in_alert_targetgroup_with_relabel_config.good.yml",
|
||||||
|
err: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url_in_scrape_targetgroup_with_relabel_config.bad",
|
||||||
|
file: "url_in_scrape_targetgroup_with_relabel_config.bad.yml",
|
||||||
|
err: "instance 0 in group 0: \"http://bad\" is not a valid hostname",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url_in_alert_targetgroup_with_relabel_config.bad",
|
||||||
|
file: "url_in_alert_targetgroup_with_relabel_config.bad.yml",
|
||||||
|
err: "\"http://bad\" is not a valid hostname",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range cases {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
|
if test.err != "" {
|
||||||
|
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckConfigSyntax(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
syntaxOnly bool
|
||||||
|
err string
|
||||||
|
errWindows string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "check with syntax only succeeds with nonexistent rule files",
|
||||||
|
file: "config_with_rule_files.yml",
|
||||||
|
syntaxOnly: true,
|
||||||
|
err: "",
|
||||||
|
errWindows: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check without syntax only fails with nonexistent rule files",
|
||||||
|
file: "config_with_rule_files.yml",
|
||||||
|
syntaxOnly: false,
|
||||||
|
err: "\"testdata/non-existent-file.yml\" does not point to an existing file",
|
||||||
|
errWindows: "\"testdata\\\\non-existent-file.yml\" does not point to an existing file",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check with syntax only succeeds with nonexistent service discovery files",
|
||||||
|
file: "config_with_service_discovery_files.yml",
|
||||||
|
syntaxOnly: true,
|
||||||
|
err: "",
|
||||||
|
errWindows: "",
|
||||||
|
},
|
||||||
|
// The test below doesn't fail because the file verification for ServiceDiscoveryConfigs doesn't fail the check if
|
||||||
|
// file isn't found; it only outputs a warning message.
|
||||||
|
{
|
||||||
|
name: "check without syntax only succeeds with nonexistent service discovery files",
|
||||||
|
file: "config_with_service_discovery_files.yml",
|
||||||
|
syntaxOnly: false,
|
||||||
|
err: "",
|
||||||
|
errWindows: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check with syntax only succeeds with nonexistent TLS files",
|
||||||
|
file: "config_with_tls_files.yml",
|
||||||
|
syntaxOnly: true,
|
||||||
|
err: "",
|
||||||
|
errWindows: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check without syntax only fails with nonexistent TLS files",
|
||||||
|
file: "config_with_tls_files.yml",
|
||||||
|
syntaxOnly: false,
|
||||||
|
err: "error checking client cert file \"testdata/nonexistent_cert_file.yml\": " +
|
||||||
|
"stat testdata/nonexistent_cert_file.yml: no such file or directory",
|
||||||
|
errWindows: "error checking client cert file \"testdata\\\\nonexistent_cert_file.yml\": " +
|
||||||
|
"CreateFile testdata\\nonexistent_cert_file.yml: The system cannot find the file specified.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check with syntax only succeeds with nonexistent credentials file",
|
||||||
|
file: "authorization_credentials_file.bad.yml",
|
||||||
|
syntaxOnly: true,
|
||||||
|
err: "",
|
||||||
|
errWindows: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "check without syntax only fails with nonexistent credentials file",
|
||||||
|
file: "authorization_credentials_file.bad.yml",
|
||||||
|
syntaxOnly: false,
|
||||||
|
err: "error checking authorization credentials or bearer token file \"/random/file/which/does/not/exist.yml\": " +
|
||||||
|
"stat /random/file/which/does/not/exist.yml: no such file or directory",
|
||||||
|
errWindows: "error checking authorization credentials or bearer token file \"testdata\\\\random\\\\file\\\\which\\\\does\\\\not\\\\exist.yml\": " +
|
||||||
|
"CreateFile testdata\\random\\file\\which\\does\\not\\exist.yml: The system cannot find the path specified.",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range cases {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||||
|
expectedErrMsg := test.err
|
||||||
|
if strings.Contains(runtime.GOOS, "windows") {
|
||||||
|
expectedErrMsg = test.errWindows
|
||||||
|
}
|
||||||
|
if expectedErrMsg != "" {
|
||||||
|
require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuthorizationConfig(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
file string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "authorization_credentials_file.bad",
|
||||||
|
file: "authorization_credentials_file.bad.yml",
|
||||||
|
err: "error checking authorization credentials or bearer token file",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "authorization_credentials_file.good",
|
||||||
|
file: "authorization_credentials_file.good.yml",
|
||||||
|
err: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range cases {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
|
if test.err != "" {
|
||||||
|
require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckMetricsExtended(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.Skip("Skipping on windows")
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open("testdata/metrics-test.prom")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
stats, total, err := checkMetricsExtended(f)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 27, total)
|
||||||
|
require.Equal(t, []metricStat{
|
||||||
|
{
|
||||||
|
name: "prometheus_tsdb_compaction_chunk_size_bytes",
|
||||||
|
cardinality: 15,
|
||||||
|
percentage: float64(15) / float64(27),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "go_gc_duration_seconds",
|
||||||
|
cardinality: 7,
|
||||||
|
percentage: float64(7) / float64(27),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "net_conntrack_dialer_conn_attempted_total",
|
||||||
|
cardinality: 4,
|
||||||
|
percentage: float64(4) / float64(27),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "go_info",
|
||||||
|
cardinality: 1,
|
||||||
|
percentage: float64(1) / float64(27),
|
||||||
|
},
|
||||||
|
}, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExitCodes(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range []struct {
|
||||||
|
file string
|
||||||
|
exitCode int
|
||||||
|
lintIssue bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
file: "prometheus-config.good.yml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "prometheus-config.bad.yml",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "prometheus-config.nonexistent.yml",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
file: "prometheus-config.lint.yml",
|
||||||
|
lintIssue: true,
|
||||||
|
exitCode: 3,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(c.file, func(t *testing.T) {
|
||||||
|
for _, lintFatal := range []bool{true, false} {
|
||||||
|
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
|
||||||
|
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
||||||
|
if lintFatal {
|
||||||
|
args = append(args, "--lint-fatal")
|
||||||
|
}
|
||||||
|
tool := exec.Command(promtoolPath, args...)
|
||||||
|
err := tool.Run()
|
||||||
|
if c.exitCode == 0 || (c.lintIssue && !lintFatal) {
|
||||||
|
require.NoError(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
var exitError *exec.ExitError
|
||||||
|
if errors.As(err, &exitError) {
|
||||||
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
|
require.Equal(t, c.exitCode, status.ExitStatus())
|
||||||
|
} else {
|
||||||
|
t.Errorf("unable to retrieve the exit status for promtool: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDocumentation(t *testing.T) {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, promtoolPath, "-test.main", "write-documentation")
|
||||||
|
|
||||||
|
var stdout bytes.Buffer
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
|
if exitError.ExitCode() != 0 {
|
||||||
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
generatedContent := strings.ReplaceAll(stdout.String(), filepath.Base(promtoolPath), strings.TrimSuffix(filepath.Base(promtoolPath), ".test"))
|
||||||
|
|
||||||
|
expectedContent, err := os.ReadFile(filepath.Join("..", "..", "docs", "command-line", "promtool.md"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckRules(t *testing.T) {
|
||||||
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/rules.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||||
|
require.Equal(t, successExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/rules-bad.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||||
|
require.Equal(t, failureExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/prometheus-rules.lint.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
|
||||||
|
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||||
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
||||||
|
require.Equal(t, successExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
||||||
|
require.Equal(t, failureExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
||||||
|
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
138
cmd/promtool/metrics.go
Normal file
138
cmd/promtool/metrics.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
config_util "github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
|
"github.com/prometheus/prometheus/util/fmtutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Push metrics to a prometheus remote write (for testing purpose only).
|
||||||
|
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
|
||||||
|
addressURL, err := url.Parse(url.String())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// build remote write client
|
||||||
|
writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{
|
||||||
|
URL: &config_util.URL{URL: addressURL},
|
||||||
|
Timeout: model.Duration(timeout),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// set custom tls config from httpConfigFilePath
|
||||||
|
// set custom headers to every request
|
||||||
|
client, ok := writeClient.(*remote.Client)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient))
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
client.Client.Transport = &setHeadersTransport{
|
||||||
|
RoundTripper: roundTripper,
|
||||||
|
headers: headers,
|
||||||
|
}
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
var failed bool
|
||||||
|
|
||||||
|
if len(files) == 0 {
|
||||||
|
data, err = io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
fmt.Printf("Parsing standard input\n")
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics pushed to remote write.\n")
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
data, err = os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Parsing metrics file %s\n", file)
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed {
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
|
||||||
|
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := metricsData.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode the request body into snappy encoding.
|
||||||
|
compressed := snappy.Encode(nil, raw)
|
||||||
|
err = client.Store(context.Background(), compressed, 0)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type setHeadersTransport struct {
|
||||||
|
http.RoundTripper
|
||||||
|
headers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
for key, value := range s.headers {
|
||||||
|
req.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
return s.RoundTripper.RoundTrip(req)
|
||||||
|
}
|
250
cmd/promtool/rules.go
Normal file
250
cmd/promtool/rules.go
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"github.com/go-kit/log/level"
|
||||||
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
"github.com/prometheus/prometheus/rules"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxSamplesInMemory = 5000
|
||||||
|
|
||||||
|
type queryRangeAPI interface {
|
||||||
|
QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ruleImporter struct {
|
||||||
|
logger log.Logger
|
||||||
|
config ruleImporterConfig
|
||||||
|
|
||||||
|
apiClient queryRangeAPI
|
||||||
|
|
||||||
|
groups map[string]*rules.Group
|
||||||
|
ruleManager *rules.Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
type ruleImporterConfig struct {
|
||||||
|
outputDir string
|
||||||
|
start time.Time
|
||||||
|
end time.Time
|
||||||
|
evalInterval time.Duration
|
||||||
|
maxBlockDuration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||||
|
// written to disk in blocks.
|
||||||
|
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||||
|
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||||
|
return &ruleImporter{
|
||||||
|
logger: logger,
|
||||||
|
config: config,
|
||||||
|
apiClient: apiClient,
|
||||||
|
ruleManager: rules.NewManager(&rules.ManagerOptions{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadGroups parses groups from a list of recording rule files.
|
||||||
|
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||||
|
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||||
|
if errs != nil {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
importer.groups = groups
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
||||||
|
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
||||||
|
for name, group := range importer.groups {
|
||||||
|
level.Info(importer.logger).Log("backfiller", "processing group", "name", name)
|
||||||
|
|
||||||
|
for i, r := range group.Rules() {
|
||||||
|
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
|
||||||
|
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// importRule queries a prometheus API to evaluate rules at times in the past.
|
||||||
|
func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName string, ruleLabels labels.Labels, start, end time.Time,
|
||||||
|
maxBlockDuration int64, grp *rules.Group,
|
||||||
|
) (err error) {
|
||||||
|
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
|
||||||
|
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
||||||
|
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
||||||
|
|
||||||
|
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
|
||||||
|
endOfBlock := startOfBlock + blockDuration - 1
|
||||||
|
|
||||||
|
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
||||||
|
startWithAlignment := grp.EvalTimestamp(time.Unix(currStart, 0).UTC().UnixNano())
|
||||||
|
for startWithAlignment.Unix() < currStart {
|
||||||
|
startWithAlignment = startWithAlignment.Add(grp.Interval())
|
||||||
|
}
|
||||||
|
end := time.Unix(min(endOfBlock/int64(time.Second/time.Millisecond), end.Unix()), 0).UTC()
|
||||||
|
if end.Before(startWithAlignment) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val, warnings, err := importer.apiClient.QueryRange(ctx,
|
||||||
|
ruleExpr,
|
||||||
|
v1.Range{
|
||||||
|
Start: startWithAlignment,
|
||||||
|
End: end,
|
||||||
|
Step: grp.Interval(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("query range: %w", err)
|
||||||
|
}
|
||||||
|
if warnings != nil {
|
||||||
|
level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings)
|
||||||
|
}
|
||||||
|
|
||||||
|
// To prevent races with compaction, a block writer only allows appending samples
|
||||||
|
// that are at most half a block size older than the most recent sample appended so far.
|
||||||
|
// However, in the way we use the block writer here, compaction doesn't happen, while we
|
||||||
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
|
// original interval later.
|
||||||
|
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("new block writer: %w", err)
|
||||||
|
}
|
||||||
|
var closed bool
|
||||||
|
defer func() {
|
||||||
|
if !closed {
|
||||||
|
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
app := newMultipleAppender(ctx, w)
|
||||||
|
var matrix model.Matrix
|
||||||
|
switch val.Type() {
|
||||||
|
case model.ValMatrix:
|
||||||
|
matrix = val.(model.Matrix)
|
||||||
|
|
||||||
|
for _, sample := range matrix {
|
||||||
|
lb := labels.NewBuilder(labels.Labels{})
|
||||||
|
|
||||||
|
for name, value := range sample.Metric {
|
||||||
|
lb.Set(string(name), string(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setting the rule labels after the output of the query,
|
||||||
|
// so they can override query output.
|
||||||
|
ruleLabels.Range(func(l labels.Label) {
|
||||||
|
lb.Set(l.Name, l.Value)
|
||||||
|
})
|
||||||
|
|
||||||
|
lb.Set(labels.MetricName, ruleName)
|
||||||
|
lbls := lb.Labels()
|
||||||
|
|
||||||
|
for _, value := range sample.Values {
|
||||||
|
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
||||||
|
return fmt.Errorf("add: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("rule result is wrong type %s", val.Type().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := app.flushAndCommit(ctx); err != nil {
|
||||||
|
return fmt.Errorf("flush and commit: %w", err)
|
||||||
|
}
|
||||||
|
err = tsdb_errors.NewMulti(err, w.Close()).Err()
|
||||||
|
closed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMultipleAppender(ctx context.Context, blockWriter *tsdb.BlockWriter) *multipleAppender {
|
||||||
|
return &multipleAppender{
|
||||||
|
maxSamplesInMemory: maxSamplesInMemory,
|
||||||
|
writer: blockWriter,
|
||||||
|
appender: blockWriter.Appender(ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// multipleAppender keeps track of how many series have been added to the current appender.
|
||||||
|
// If the max samples have been added, then all series are committed and a new appender is created.
|
||||||
|
type multipleAppender struct {
|
||||||
|
maxSamplesInMemory int
|
||||||
|
currentSampleCount int
|
||||||
|
writer *tsdb.BlockWriter
|
||||||
|
appender storage.Appender
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *multipleAppender) add(ctx context.Context, l labels.Labels, t int64, v float64) error {
|
||||||
|
if _, err := m.appender.Append(0, l, t, v); err != nil {
|
||||||
|
return fmt.Errorf("multiappender append: %w", err)
|
||||||
|
}
|
||||||
|
m.currentSampleCount++
|
||||||
|
if m.currentSampleCount >= m.maxSamplesInMemory {
|
||||||
|
return m.commit(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *multipleAppender) commit(ctx context.Context) error {
|
||||||
|
if m.currentSampleCount == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := m.appender.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("multiappender commit: %w", err)
|
||||||
|
}
|
||||||
|
m.appender = m.writer.Appender(ctx)
|
||||||
|
m.currentSampleCount = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *multipleAppender) flushAndCommit(ctx context.Context) error {
|
||||||
|
if err := m.commit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := m.writer.Flush(ctx); err != nil {
|
||||||
|
return fmt.Errorf("multiappender flush: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(x, y int64) int64 {
|
||||||
|
if x > y {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return y
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(x, y int64) int64 {
|
||||||
|
if x < y {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return y
|
||||||
|
}
|
265
cmd/promtool/rules_test.go
Normal file
265
cmd/promtool/rules_test.go
Normal file
|
@ -0,0 +1,265 @@
|
||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockQueryRangeAPI struct {
|
||||||
|
samples model.Matrix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
|
||||||
|
return mockAPI.samples, v1.Warnings{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Millisecond
|
||||||
|
|
||||||
|
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
|
||||||
|
func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
|
const (
|
||||||
|
testMaxSampleCount = 50
|
||||||
|
testValue = 123
|
||||||
|
testValue2 = 98
|
||||||
|
)
|
||||||
|
var (
|
||||||
|
start = time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
|
||||||
|
testTime = model.Time(start.Add(-9 * time.Hour).Unix())
|
||||||
|
testTime2 = model.Time(start.Add(-8 * time.Hour).Unix())
|
||||||
|
twentyFourHourDuration, _ = time.ParseDuration("24h")
|
||||||
|
)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
runcount int
|
||||||
|
maxBlockDuration time.Duration
|
||||||
|
expectedBlockCount int
|
||||||
|
expectedSeriesCount int
|
||||||
|
expectedSampleCount int
|
||||||
|
samples []*model.SampleStream
|
||||||
|
}{
|
||||||
|
{"no samples", 1, defaultBlockDuration, 0, 0, 0, []*model.SampleStream{}},
|
||||||
|
{"run importer once", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||||
|
{"run importer with dup name label", 1, defaultBlockDuration, 8, 4, 4, []*model.SampleStream{{Metric: model.Metric{"__name__": "val1", "name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||||
|
{"one importer twice", 2, defaultBlockDuration, 8, 4, 8, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}, {Timestamp: testTime2, Value: testValue2}}}}},
|
||||||
|
{"run importer once with larger blocks", 1, twentyFourHourDuration, 4, 4, 4, []*model.SampleStream{{Metric: model.Metric{"name1": "val1"}, Values: []model.SamplePair{{Timestamp: testTime, Value: testValue}}}}},
|
||||||
|
}
|
||||||
|
for _, tt := range testCases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Execute the test more than once to simulate running the rule importer twice with the same data.
|
||||||
|
// We expect duplicate blocks with the same series are created when run more than once.
|
||||||
|
for i := 0; i < tt.runcount; i++ {
|
||||||
|
|
||||||
|
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration)
|
||||||
|
require.NoError(t, err)
|
||||||
|
path1 := filepath.Join(tmpDir, "test.file")
|
||||||
|
require.NoError(t, createSingleRuleTestFiles(path1))
|
||||||
|
path2 := filepath.Join(tmpDir, "test2.file")
|
||||||
|
require.NoError(t, createMultiRuleTestFiles(path2))
|
||||||
|
|
||||||
|
// Confirm that the rule files were loaded in correctly.
|
||||||
|
errs := ruleImporter.loadGroups(ctx, []string{path1, path2})
|
||||||
|
for _, err := range errs {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, 3, len(ruleImporter.groups))
|
||||||
|
group1 := ruleImporter.groups[path1+";group0"]
|
||||||
|
require.NotNil(t, group1)
|
||||||
|
const defaultInterval = 60
|
||||||
|
require.Equal(t, defaultInterval*time.Second, group1.Interval())
|
||||||
|
gRules := group1.Rules()
|
||||||
|
require.Equal(t, 1, len(gRules))
|
||||||
|
require.Equal(t, "rule1", gRules[0].Name())
|
||||||
|
require.Equal(t, "ruleExpr", gRules[0].Query().String())
|
||||||
|
require.Equal(t, 1, gRules[0].Labels().Len())
|
||||||
|
|
||||||
|
group2 := ruleImporter.groups[path2+";group2"]
|
||||||
|
require.NotNil(t, group2)
|
||||||
|
require.Equal(t, defaultInterval*time.Second, group2.Interval())
|
||||||
|
g2Rules := group2.Rules()
|
||||||
|
require.Equal(t, 2, len(g2Rules))
|
||||||
|
require.Equal(t, "grp2_rule1", g2Rules[0].Name())
|
||||||
|
require.Equal(t, "grp2_rule1_expr", g2Rules[0].Query().String())
|
||||||
|
require.Equal(t, 0, g2Rules[0].Labels().Len())
|
||||||
|
|
||||||
|
// Backfill all recording rules then check the blocks to confirm the correct data was created.
|
||||||
|
errs = ruleImporter.importAll(ctx)
|
||||||
|
for _, err := range errs {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := tsdb.DefaultOptions()
|
||||||
|
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blocks := db.Blocks()
|
||||||
|
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
|
||||||
|
|
||||||
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
|
var seriesCount, samplesCount int
|
||||||
|
for selectedSeries.Next() {
|
||||||
|
seriesCount++
|
||||||
|
series := selectedSeries.At()
|
||||||
|
if series.Labels().Len() != 3 {
|
||||||
|
require.Equal(t, 2, series.Labels().Len())
|
||||||
|
x := labels.FromStrings("__name__", "grp2_rule1", "name1", "val1")
|
||||||
|
require.Equal(t, x, series.Labels())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, 3, series.Labels().Len())
|
||||||
|
}
|
||||||
|
it := series.Iterator(nil)
|
||||||
|
for it.Next() == chunkenc.ValFloat {
|
||||||
|
samplesCount++
|
||||||
|
ts, v := it.At()
|
||||||
|
if v == testValue {
|
||||||
|
require.Equal(t, int64(testTime), ts)
|
||||||
|
} else {
|
||||||
|
require.Equal(t, int64(testTime2), ts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, it.Err())
|
||||||
|
}
|
||||||
|
require.NoError(t, selectedSeries.Err())
|
||||||
|
require.Equal(t, tt.expectedSeriesCount, seriesCount)
|
||||||
|
require.Equal(t, tt.expectedSampleCount, samplesCount)
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
|
logger := log.NewNopLogger()
|
||||||
|
cfg := ruleImporterConfig{
|
||||||
|
outputDir: tmpDir,
|
||||||
|
start: start.Add(-10 * time.Hour),
|
||||||
|
end: start.Add(-7 * time.Hour),
|
||||||
|
evalInterval: 60 * time.Second,
|
||||||
|
maxBlockDuration: maxBlockDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRuleImporter(logger, cfg, mockQueryRangeAPI{
|
||||||
|
samples: testSamples,
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createSingleRuleTestFiles(path string) error {
|
||||||
|
recordingRules := `groups:
|
||||||
|
- name: group0
|
||||||
|
rules:
|
||||||
|
- record: rule1
|
||||||
|
expr: ruleExpr
|
||||||
|
labels:
|
||||||
|
testlabel11: testlabelvalue11
|
||||||
|
`
|
||||||
|
return os.WriteFile(path, []byte(recordingRules), 0o777)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMultiRuleTestFiles(path string) error {
|
||||||
|
recordingRules := `groups:
|
||||||
|
- name: group1
|
||||||
|
rules:
|
||||||
|
- record: grp1_rule1
|
||||||
|
expr: grp1_rule1_expr
|
||||||
|
labels:
|
||||||
|
testlabel11: testlabelvalue12
|
||||||
|
- name: group2
|
||||||
|
rules:
|
||||||
|
- record: grp2_rule1
|
||||||
|
expr: grp2_rule1_expr
|
||||||
|
- record: grp2_rule2
|
||||||
|
expr: grp2_rule2_expr
|
||||||
|
labels:
|
||||||
|
testlabel11: testlabelvalue13
|
||||||
|
`
|
||||||
|
return os.WriteFile(path, []byte(recordingRules), 0o777)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
|
||||||
|
// received from Prometheus Query API, including the __name__ label.
|
||||||
|
func TestBackfillLabels(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
start := time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
|
||||||
|
mockAPISamples := []*model.SampleStream{
|
||||||
|
{
|
||||||
|
Metric: model.Metric{"name1": "override-me", "__name__": "override-me-too"},
|
||||||
|
Values: []model.SamplePair{{Timestamp: model.TimeFromUnixNano(start.UnixNano()), Value: 123}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, mockAPISamples, defaultBlockDuration)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
path := filepath.Join(tmpDir, "test.file")
|
||||||
|
recordingRules := `groups:
|
||||||
|
- name: group0
|
||||||
|
rules:
|
||||||
|
- record: rulename
|
||||||
|
expr: ruleExpr
|
||||||
|
labels:
|
||||||
|
name1: value-from-rule
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(path, []byte(recordingRules), 0o777))
|
||||||
|
errs := ruleImporter.loadGroups(ctx, []string{path})
|
||||||
|
for _, err := range errs {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
errs = ruleImporter.importAll(ctx)
|
||||||
|
for _, err := range errs {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := tsdb.DefaultOptions()
|
||||||
|
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("correct-labels", func(t *testing.T) {
|
||||||
|
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
|
for selectedSeries.Next() {
|
||||||
|
series := selectedSeries.At()
|
||||||
|
expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule")
|
||||||
|
require.Equal(t, expectedLabels, series.Labels())
|
||||||
|
}
|
||||||
|
require.NoError(t, selectedSeries.Err())
|
||||||
|
require.NoError(t, q.Close())
|
||||||
|
require.NoError(t, db.Close())
|
||||||
|
})
|
||||||
|
}
|
154
cmd/promtool/sd.go
Normal file
154
cmd/promtool/sd.go
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/scrape"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sdCheckResult struct {
|
||||||
|
DiscoveredLabels labels.Labels `json:"discoveredLabels"`
|
||||||
|
Labels labels.Labels `json:"labels"`
|
||||||
|
Error error `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckSD performs service discovery for the given job name and reports the results.
|
||||||
|
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool) int {
|
||||||
|
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
|
||||||
|
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var scrapeConfig *config.ScrapeConfig
|
||||||
|
scfgs, err := cfg.GetScrapeConfigs()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Cannot load scrape configs", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
jobs := []string{}
|
||||||
|
jobMatched := false
|
||||||
|
for _, v := range scfgs {
|
||||||
|
jobs = append(jobs, v.JobName)
|
||||||
|
if v.JobName == sdJobName {
|
||||||
|
jobMatched = true
|
||||||
|
scrapeConfig = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !jobMatched {
|
||||||
|
fmt.Fprintf(os.Stderr, "Job %s not found. Select one of:\n", sdJobName)
|
||||||
|
for _, job := range jobs {
|
||||||
|
fmt.Fprintf(os.Stderr, "\t%s\n", job)
|
||||||
|
}
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
targetGroupChan := make(chan []*targetgroup.Group)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), sdTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
|
||||||
|
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
go d.Run(ctx, targetGroupChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
var targetGroups []*targetgroup.Group
|
||||||
|
sdCheckResults := make(map[string][]*targetgroup.Group)
|
||||||
|
outerLoop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case targetGroups = <-targetGroupChan:
|
||||||
|
for _, tg := range targetGroups {
|
||||||
|
sdCheckResults[tg.Source] = append(sdCheckResults[tg.Source], tg)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
break outerLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results := []sdCheckResult{}
|
||||||
|
for _, tgs := range sdCheckResults {
|
||||||
|
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := json.MarshalIndent(results, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("%s", res)
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
||||||
|
sdCheckResults := []sdCheckResult{}
|
||||||
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
|
for _, targetGroup := range targetGroups {
|
||||||
|
for _, target := range targetGroup.Targets {
|
||||||
|
lb.Reset(labels.EmptyLabels())
|
||||||
|
|
||||||
|
for name, value := range target {
|
||||||
|
lb.Set(string(name), string(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, value := range targetGroup.Labels {
|
||||||
|
if _, ok := target[name]; !ok {
|
||||||
|
lb.Set(string(name), string(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort)
|
||||||
|
result := sdCheckResult{
|
||||||
|
DiscoveredLabels: orig,
|
||||||
|
Labels: res,
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
|
||||||
|
duplicateRes := false
|
||||||
|
for _, sdCheckRes := range sdCheckResults {
|
||||||
|
if reflect.DeepEqual(sdCheckRes, result) {
|
||||||
|
duplicateRes = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !duplicateRes {
|
||||||
|
sdCheckResults = append(sdCheckResults, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sdCheckResults
|
||||||
|
}
|
73
cmd/promtool/sd_test.go
Normal file
73
cmd/promtool/sd_test.go
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSDCheckResult(t *testing.T) {
|
||||||
|
targetGroups := []*targetgroup.Group{{
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
reg, err := relabel.NewRegexp("(.*)")
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
scrapeConfig := &config.ScrapeConfig{
|
||||||
|
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||||
|
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||||
|
RelabelConfigs: []*relabel.Config{{
|
||||||
|
SourceLabels: model.LabelNames{"foo"},
|
||||||
|
Action: relabel.Replace,
|
||||||
|
TargetLabel: "newfoo",
|
||||||
|
Regex: reg,
|
||||||
|
Replacement: "$1",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedSDCheckResult := []sdCheckResult{
|
||||||
|
{
|
||||||
|
DiscoveredLabels: labels.FromStrings(
|
||||||
|
"__address__", "localhost:8080",
|
||||||
|
"__scrape_interval__", "1m",
|
||||||
|
"__scrape_timeout__", "10s",
|
||||||
|
"foo", "bar",
|
||||||
|
),
|
||||||
|
Labels: labels.FromStrings(
|
||||||
|
"__address__", "localhost:8080",
|
||||||
|
"__scrape_interval__", "1m",
|
||||||
|
"__scrape_timeout__", "10s",
|
||||||
|
"foo", "bar",
|
||||||
|
"instance", "localhost:8080",
|
||||||
|
"newfoo", "bar",
|
||||||
|
),
|
||||||
|
Error: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
|
||||||
|
}
|
7
cmd/promtool/testdata/at-modifier-test.yml
vendored
Normal file
7
cmd/promtool/testdata/at-modifier-test.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
rule_files:
|
||||||
|
- at-modifier.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: "requests{}"
|
||||||
|
values: 1
|
7
cmd/promtool/testdata/at-modifier.yml
vendored
Normal file
7
cmd/promtool/testdata/at-modifier.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# This is the rules file for at-modifier-test.yml.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: at-modifier
|
||||||
|
rules:
|
||||||
|
- record: x
|
||||||
|
expr: "requests @ 1000"
|
4
cmd/promtool/testdata/authorization_credentials_file.bad.yml
vendored
Normal file
4
cmd/promtool/testdata/authorization_credentials_file.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: test
|
||||||
|
authorization:
|
||||||
|
credentials_file: "/random/file/which/does/not/exist.yml"
|
4
cmd/promtool/testdata/authorization_credentials_file.good.yml
vendored
Normal file
4
cmd/promtool/testdata/authorization_credentials_file.good.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: test
|
||||||
|
authorization:
|
||||||
|
credentials_file: "."
|
4
cmd/promtool/testdata/bad-input-series.yml
vendored
Normal file
4
cmd/promtool/testdata/bad-input-series.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: 'up{job="prometheus", instance="localhost:9090"'
|
||||||
|
values: "0+0x1440"
|
12
cmd/promtool/testdata/bad-promql.yml
vendored
Normal file
12
cmd/promtool/testdata/bad-promql.yml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: 'join_1{a="1",b="2"}'
|
||||||
|
values: 1
|
||||||
|
- series: 'join_2{a="1",b="3"}'
|
||||||
|
values: 2
|
||||||
|
- series: 'join_2{a="1",b="4"}'
|
||||||
|
values: 3
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
# This PromQL generates an error.
|
||||||
|
- expr: "join_1 + on(a) join_2"
|
14
cmd/promtool/testdata/bad-rules-error-test.yml
vendored
Normal file
14
cmd/promtool/testdata/bad-rules-error-test.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
rule_files:
|
||||||
|
- bad-rules-error.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: 'join_1{a="1",b="2"}'
|
||||||
|
values: 1
|
||||||
|
- series: 'join_2{a="1",b="3"}'
|
||||||
|
values: 2
|
||||||
|
- series: 'join_2{a="1",b="4"}'
|
||||||
|
values: 3
|
||||||
|
|
||||||
|
# Just the existence of the data, that can't be joined, makes the recording
|
||||||
|
# rules error.
|
7
cmd/promtool/testdata/bad-rules-error.yml
vendored
Normal file
7
cmd/promtool/testdata/bad-rules-error.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# This is the rules file for bad-rules-error-test.yml.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: bad-example
|
||||||
|
rules:
|
||||||
|
- record: joined
|
||||||
|
expr: join_1 + on(a) join_2
|
6
cmd/promtool/testdata/bad-rules-syntax-test.yml
vendored
Normal file
6
cmd/promtool/testdata/bad-rules-syntax-test.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
rule_files:
|
||||||
|
- bad-rules-syntax.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
# Need a test to ensure the recording rules actually run.
|
||||||
|
- {}
|
7
cmd/promtool/testdata/bad-rules-syntax.yml
vendored
Normal file
7
cmd/promtool/testdata/bad-rules-syntax.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# This is the rules file for bad-rules-syntax-test.yml.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: bad-syntax
|
||||||
|
rules:
|
||||||
|
- record: x
|
||||||
|
expr: 'test +'
|
2
cmd/promtool/testdata/bad-sd-file-format.yml
vendored
Normal file
2
cmd/promtool/testdata/bad-sd-file-format.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
- targats:
|
||||||
|
- localhost:9100
|
3
cmd/promtool/testdata/config_with_rule_files.yml
vendored
Normal file
3
cmd/promtool/testdata/config_with_rule_files.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
rule_files:
|
||||||
|
- non-existent-file.yml
|
||||||
|
- /etc/non/existent/file.yml
|
12
cmd/promtool/testdata/config_with_service_discovery_files.yml
vendored
Normal file
12
cmd/promtool/testdata/config_with_service_discovery_files.yml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
file_sd_configs:
|
||||||
|
- files:
|
||||||
|
- nonexistent_file.yml
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- scheme: http
|
||||||
|
api_version: v1
|
||||||
|
file_sd_configs:
|
||||||
|
- files:
|
||||||
|
- nonexistent_file.yml
|
5
cmd/promtool/testdata/config_with_tls_files.yml
vendored
Normal file
5
cmd/promtool/testdata/config_with_tls_files.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: "some job"
|
||||||
|
tls_config:
|
||||||
|
cert_file: nonexistent_cert_file.yml
|
||||||
|
key_file: nonexistent_key_file.yml
|
38
cmd/promtool/testdata/failing.yml
vendored
Normal file
38
cmd/promtool/testdata/failing.yml
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
rule_files:
|
||||||
|
- rules.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
# Simple failing test, depends on no rules.
|
||||||
|
- interval: 1m
|
||||||
|
name: "Failing test"
|
||||||
|
input_series:
|
||||||
|
- series: test
|
||||||
|
values: '0'
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: test
|
||||||
|
eval_time: 0m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: test
|
||||||
|
|
||||||
|
alert_rule_test:
|
||||||
|
- eval_time: 0m
|
||||||
|
alertname: Test
|
||||||
|
exp_alerts:
|
||||||
|
- exp_labels: {}
|
||||||
|
|
||||||
|
# Alerts firing, but no alert expected by the test.
|
||||||
|
- interval: 1m
|
||||||
|
name: Failing alert test
|
||||||
|
input_series:
|
||||||
|
- series: 'up{job="test"}'
|
||||||
|
values: 0x10
|
||||||
|
|
||||||
|
alert_rule_test:
|
||||||
|
- eval_time: 5m
|
||||||
|
alertname: InstanceDown
|
||||||
|
exp_alerts: []
|
||||||
|
- eval_time: 5m
|
||||||
|
alertname: AlwaysFiring
|
||||||
|
exp_alerts: []
|
8
cmd/promtool/testdata/good-sd-file.json
vendored
Normal file
8
cmd/promtool/testdata/good-sd-file.json
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"labels": {
|
||||||
|
"job": "node"
|
||||||
|
},
|
||||||
|
"targets": ["localhost:9100"]
|
||||||
|
}
|
||||||
|
]
|
4
cmd/promtool/testdata/good-sd-file.yaml
vendored
Normal file
4
cmd/promtool/testdata/good-sd-file.yaml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
- labels:
|
||||||
|
job: node
|
||||||
|
- targets:
|
||||||
|
- localhost:9100
|
4
cmd/promtool/testdata/good-sd-file.yml
vendored
Normal file
4
cmd/promtool/testdata/good-sd-file.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
- labels:
|
||||||
|
job: node
|
||||||
|
- targets:
|
||||||
|
- localhost:9100
|
34
cmd/promtool/testdata/long-period.yml
vendored
Normal file
34
cmd/promtool/testdata/long-period.yml
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# Evaluate once every 100d to avoid this taking too long.
|
||||||
|
evaluation_interval: 100d
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
- rules.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- interval: 100d
|
||||||
|
input_series:
|
||||||
|
- series: test
|
||||||
|
# Max time in time.Duration is 106751d from 1970 (2^63/10^9), i.e. 2262.
|
||||||
|
# We use the nearest 100 days to that to ensure the unit tests can fully
|
||||||
|
# cover the expected range.
|
||||||
|
values: '0+1x1067'
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: timestamp(test)
|
||||||
|
eval_time: 0m
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
- expr: test
|
||||||
|
eval_time: 100d # one evaluation_interval.
|
||||||
|
exp_samples:
|
||||||
|
- labels: test
|
||||||
|
value: 1
|
||||||
|
- expr: timestamp(test)
|
||||||
|
eval_time: 106700d
|
||||||
|
exp_samples:
|
||||||
|
- value: 9218880000 # 106700d -> seconds.
|
||||||
|
- expr: fixed_data
|
||||||
|
eval_time: 106700d
|
||||||
|
exp_samples:
|
||||||
|
- labels: fixed_data
|
||||||
|
value: 1
|
35
cmd/promtool/testdata/metrics-test.prom
vendored
Normal file
35
cmd/promtool/testdata/metrics-test.prom
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
|
||||||
|
# TYPE go_gc_duration_seconds summary
|
||||||
|
go_gc_duration_seconds{quantile="0"} 2.391e-05
|
||||||
|
go_gc_duration_seconds{quantile="0.25"} 9.4402e-05
|
||||||
|
go_gc_duration_seconds{quantile="0.5"} 0.000118953
|
||||||
|
go_gc_duration_seconds{quantile="0.75"} 0.000145884
|
||||||
|
go_gc_duration_seconds{quantile="1"} 0.005201208
|
||||||
|
go_gc_duration_seconds_sum 0.036134048
|
||||||
|
go_gc_duration_seconds_count 232
|
||||||
|
# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction
|
||||||
|
# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 662
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 1460
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2266
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3958
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 4861
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 5721
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 10493
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 12464
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 13254
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 13699
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 13806
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 13852
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 13867
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_sum 3.886707e+06
|
||||||
|
prometheus_tsdb_compaction_chunk_size_bytes_count 13867
|
||||||
|
# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name.
|
||||||
|
# TYPE net_conntrack_dialer_conn_attempted_total counter
|
||||||
|
net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 5210
|
||||||
|
net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0
|
||||||
|
net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 21
|
||||||
|
net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 21
|
||||||
|
# HELP go_info Information about the Go environment.
|
||||||
|
# TYPE go_info gauge
|
||||||
|
go_info{version="go1.17"} 1
|
7
cmd/promtool/testdata/negative-offset-test.yml
vendored
Normal file
7
cmd/promtool/testdata/negative-offset-test.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
rule_files:
|
||||||
|
- negative-offset.yml
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: "requests{}"
|
||||||
|
values: 1
|
7
cmd/promtool/testdata/negative-offset.yml
vendored
Normal file
7
cmd/promtool/testdata/negative-offset.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# This is the rules file for negative-offset-test.yml.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: negative-offset
|
||||||
|
rules:
|
||||||
|
- record: x
|
||||||
|
expr: "requests offset -5m"
|
1
cmd/promtool/testdata/prometheus-config.bad.yml
vendored
Normal file
1
cmd/promtool/testdata/prometheus-config.bad.yml
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
not-prometheus:
|
2
cmd/promtool/testdata/prometheus-config.lint.yml
vendored
Normal file
2
cmd/promtool/testdata/prometheus-config.lint.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
rule_files:
|
||||||
|
- prometheus-rules.lint.yml
|
17
cmd/promtool/testdata/prometheus-rules.lint.yml
vendored
Normal file
17
cmd/promtool/testdata/prometheus-rules.lint.yml
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
groups:
|
||||||
|
- name: example
|
||||||
|
rules:
|
||||||
|
- alert: HighRequestLatency
|
||||||
|
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: High request latency
|
||||||
|
- alert: HighRequestLatency
|
||||||
|
expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5
|
||||||
|
for: 10m
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: High request latency
|
28
cmd/promtool/testdata/rules-bad.yml
vendored
Normal file
28
cmd/promtool/testdata/rules-bad.yml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
# This is the rules file.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: alerts
|
||||||
|
rules:
|
||||||
|
- alert: InstanceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $label.foo }} down"
|
||||||
|
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||||
|
- alert: AlwaysFiring
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
- name: rules
|
||||||
|
rules:
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
||||||
|
|
||||||
|
# A recording rule that doesn't depend on input series.
|
||||||
|
- record: fixed_data
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
# Subquery with default resolution test.
|
||||||
|
- record: suquery_interval_test
|
||||||
|
expr: count_over_time(up[5m:])
|
28
cmd/promtool/testdata/rules.yml
vendored
Normal file
28
cmd/promtool/testdata/rules.yml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
# This is the rules file.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: alerts
|
||||||
|
rules:
|
||||||
|
- alert: InstanceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $labels.instance }} down"
|
||||||
|
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||||
|
- alert: AlwaysFiring
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
- name: rules
|
||||||
|
rules:
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
||||||
|
|
||||||
|
# A recording rule that doesn't depend on input series.
|
||||||
|
- record: fixed_data
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
# Subquery with default resolution test.
|
||||||
|
- record: suquery_interval_test
|
||||||
|
expr: count_over_time(up[5m:])
|
24
cmd/promtool/testdata/rules_duplicates.yml
vendored
Normal file
24
cmd/promtool/testdata/rules_duplicates.yml
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# This is a rules file with duplicate expressions
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: base
|
||||||
|
rules:
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
||||||
|
|
||||||
|
# A recording rule that doesn't depend on input series.
|
||||||
|
- record: fixed_data
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
# Subquery with default resolution test.
|
||||||
|
- record: suquery_interval_test
|
||||||
|
expr: count_over_time(up[5m:])
|
||||||
|
|
||||||
|
# Duplicating
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
||||||
|
|
||||||
|
- name: duplicate
|
||||||
|
rules:
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
40011
cmd/promtool/testdata/rules_large.yml
vendored
Normal file
40011
cmd/promtool/testdata/rules_large.yml
vendored
Normal file
File diff suppressed because it is too large
Load diff
203
cmd/promtool/testdata/unittest.yml
vendored
Normal file
203
cmd/promtool/testdata/unittest.yml
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
rule_files:
|
||||||
|
- rules.yml
|
||||||
|
|
||||||
|
evaluation_interval: 1m
|
||||||
|
|
||||||
|
tests:
|
||||||
|
# Basic tests for promql_expr_test, not dependent on rules.
|
||||||
|
- interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: test_full
|
||||||
|
values: "0 0"
|
||||||
|
|
||||||
|
- series: test_repeat
|
||||||
|
values: "1x2"
|
||||||
|
|
||||||
|
- series: test_increase
|
||||||
|
values: "1+1x2"
|
||||||
|
|
||||||
|
- series: test_histogram
|
||||||
|
values: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- series: test_histogram_repeat
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}x2"
|
||||||
|
|
||||||
|
- series: test_histogram_increase
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}+{{sum:1.3 count:1 buckets:[1]}}x2"
|
||||||
|
|
||||||
|
- series: test_stale
|
||||||
|
values: "0 stale"
|
||||||
|
|
||||||
|
- series: test_missing
|
||||||
|
values: "0 _ _ _ _ _ _ 0"
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
# Ensure the sample is evaluated at the time we expect it to be.
|
||||||
|
- expr: timestamp(test_full)
|
||||||
|
eval_time: 0m
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
- expr: timestamp(test_full)
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- value: 60
|
||||||
|
- expr: timestamp(test_full)
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 60
|
||||||
|
|
||||||
|
# Repeat & increase
|
||||||
|
- expr: test_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: "test_repeat"
|
||||||
|
- expr: test_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 3
|
||||||
|
labels: "test_increase"
|
||||||
|
|
||||||
|
# Histograms
|
||||||
|
- expr: test_histogram
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram"
|
||||||
|
histogram: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_repeat"
|
||||||
|
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_increase"
|
||||||
|
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
||||||
|
|
||||||
|
# Ensure a value is stale as soon as it is marked as such.
|
||||||
|
- expr: test_stale
|
||||||
|
eval_time: 59s
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
labels: "test_stale"
|
||||||
|
- expr: test_stale
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples: []
|
||||||
|
|
||||||
|
# Ensure lookback delta is respected, when a value is missing.
|
||||||
|
- expr: timestamp(test_missing)
|
||||||
|
eval_time: 5m
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
- expr: timestamp(test_missing)
|
||||||
|
eval_time: 5m1s
|
||||||
|
exp_samples: []
|
||||||
|
|
||||||
|
# Minimal test case to check edge case of a single sample.
|
||||||
|
- input_series:
|
||||||
|
- series: test
|
||||||
|
values: 1
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: test
|
||||||
|
eval_time: 0
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: test
|
||||||
|
|
||||||
|
# Test recording rules run even if input_series isn't provided.
|
||||||
|
- promql_expr_test:
|
||||||
|
- expr: count_over_time(fixed_data[1h])
|
||||||
|
eval_time: 1h
|
||||||
|
exp_samples:
|
||||||
|
- value: 61
|
||||||
|
- expr: timestamp(fixed_data)
|
||||||
|
eval_time: 1h
|
||||||
|
exp_samples:
|
||||||
|
- value: 3600
|
||||||
|
|
||||||
|
# Tests for alerting rules.
|
||||||
|
- interval: 1m
|
||||||
|
input_series:
|
||||||
|
- series: 'up{job="prometheus", instance="localhost:9090"}'
|
||||||
|
values: "0+0x1440"
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: count(ALERTS) by (alertname, alertstate)
|
||||||
|
eval_time: 4m
|
||||||
|
exp_samples:
|
||||||
|
- labels: '{alertname="AlwaysFiring",alertstate="firing"}'
|
||||||
|
value: 1
|
||||||
|
- labels: '{alertname="InstanceDown",alertstate="pending"}'
|
||||||
|
value: 1
|
||||||
|
|
||||||
|
alert_rule_test:
|
||||||
|
- eval_time: 1d
|
||||||
|
alertname: AlwaysFiring
|
||||||
|
exp_alerts:
|
||||||
|
- {}
|
||||||
|
|
||||||
|
- eval_time: 1d
|
||||||
|
alertname: InstanceDown
|
||||||
|
exp_alerts:
|
||||||
|
- exp_labels:
|
||||||
|
severity: page
|
||||||
|
instance: localhost:9090
|
||||||
|
job: prometheus
|
||||||
|
exp_annotations:
|
||||||
|
summary: "Instance localhost:9090 down"
|
||||||
|
description: "localhost:9090 of job prometheus has been down for more than 5 minutes."
|
||||||
|
|
||||||
|
- eval_time: 0
|
||||||
|
alertname: AlwaysFiring
|
||||||
|
exp_alerts:
|
||||||
|
- {}
|
||||||
|
|
||||||
|
- eval_time: 0
|
||||||
|
alertname: InstanceDown
|
||||||
|
exp_alerts: []
|
||||||
|
|
||||||
|
# Tests for interval vs evaluation_interval.
|
||||||
|
- interval: 1s
|
||||||
|
input_series:
|
||||||
|
- series: 'test{job="test", instance="x:0"}'
|
||||||
|
# 2 minutes + 1 second of input data, recording rules should only run
|
||||||
|
# once a minute.
|
||||||
|
values: "0+1x120"
|
||||||
|
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: job:test:count_over_time1m
|
||||||
|
eval_time: 0m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: 'job:test:count_over_time1m{job="test"}'
|
||||||
|
- expr: timestamp(job:test:count_over_time1m)
|
||||||
|
eval_time: 10s
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
labels: '{job="test"}'
|
||||||
|
|
||||||
|
- expr: job:test:count_over_time1m
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- value: 61
|
||||||
|
labels: 'job:test:count_over_time1m{job="test"}'
|
||||||
|
- expr: timestamp(job:test:count_over_time1m)
|
||||||
|
eval_time: 1m10s
|
||||||
|
exp_samples:
|
||||||
|
- value: 60
|
||||||
|
labels: '{job="test"}'
|
||||||
|
|
||||||
|
- expr: job:test:count_over_time1m
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 61
|
||||||
|
labels: 'job:test:count_over_time1m{job="test"}'
|
||||||
|
- expr: timestamp(job:test:count_over_time1m)
|
||||||
|
eval_time: 2m59s999ms
|
||||||
|
exp_samples:
|
||||||
|
- value: 120
|
||||||
|
labels: '{job="test"}'
|
8
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
8
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- relabel_configs:
|
||||||
|
- source_labels: [__address__]
|
||||||
|
target_label: __param_target
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- http://bad
|
10
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml
vendored
Normal file
10
cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- relabel_configs:
|
||||||
|
- source_labels: [__address__]
|
||||||
|
target_label: __param_target
|
||||||
|
- target_label: __address__
|
||||||
|
replacement: good
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- http://bad
|
8
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
8
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [__address__]
|
||||||
|
target_label: __param_target
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- http://bad
|
10
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml
vendored
Normal file
10
cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [__address__]
|
||||||
|
target_label: __param_target
|
||||||
|
- target_label: __address__
|
||||||
|
replacement: good
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- http://good
|
734
cmd/promtool/tsdb.go
Normal file
734
cmd/promtool/tsdb.go
Normal file
|
@ -0,0 +1,734 @@
|
||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"runtime/pprof"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/units"
|
||||||
|
"github.com/go-kit/log"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
const timeDelta = 30000
|
||||||
|
|
||||||
|
type writeBenchmark struct {
|
||||||
|
outPath string
|
||||||
|
samplesFile string
|
||||||
|
cleanup bool
|
||||||
|
numMetrics int
|
||||||
|
|
||||||
|
storage *tsdb.DB
|
||||||
|
|
||||||
|
cpuprof *os.File
|
||||||
|
memprof *os.File
|
||||||
|
blockprof *os.File
|
||||||
|
mtxprof *os.File
|
||||||
|
logger log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
||||||
|
b := &writeBenchmark{
|
||||||
|
outPath: outPath,
|
||||||
|
samplesFile: samplesFile,
|
||||||
|
numMetrics: numMetrics,
|
||||||
|
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)),
|
||||||
|
}
|
||||||
|
if b.outPath == "" {
|
||||||
|
dir, err := os.MkdirTemp("", "tsdb_bench")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.outPath = dir
|
||||||
|
b.cleanup = true
|
||||||
|
}
|
||||||
|
if err := os.RemoveAll(b.outPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(b.outPath, 0o777); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := filepath.Join(b.outPath, "storage")
|
||||||
|
|
||||||
|
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||||
|
|
||||||
|
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
||||||
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||||
|
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
||||||
|
}, tsdb.NewDBStats())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
st.DisableCompactions()
|
||||||
|
b.storage = st
|
||||||
|
|
||||||
|
var lbs []labels.Labels
|
||||||
|
|
||||||
|
if _, err = measureTime("readData", func() error {
|
||||||
|
f, err := os.Open(b.samplesFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
lbs, err = readPrometheusLabels(f, b.numMetrics)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var total uint64
|
||||||
|
|
||||||
|
dur, err := measureTime("ingestScrapes", func() error {
|
||||||
|
if err := b.startProfiling(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
total, err = b.ingestScrapes(lbs, numScrapes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(" > total samples:", total)
|
||||||
|
fmt.Println(" > samples/sec:", float64(total)/dur.Seconds())
|
||||||
|
|
||||||
|
if _, err = measureTime("stopStorage", func() error {
|
||||||
|
if err := b.storage.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.stopProfiling()
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
|
||||||
|
var mu sync.Mutex
|
||||||
|
var total uint64
|
||||||
|
|
||||||
|
for i := 0; i < scrapeCount; i += 100 {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
lbls := lbls
|
||||||
|
for len(lbls) > 0 {
|
||||||
|
l := 1000
|
||||||
|
if len(lbls) < 1000 {
|
||||||
|
l = len(lbls)
|
||||||
|
}
|
||||||
|
batch := lbls[:l]
|
||||||
|
lbls = lbls[l:]
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
|
||||||
|
if err != nil {
|
||||||
|
// exitWithError(err)
|
||||||
|
fmt.Println(" err", err)
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
total += n
|
||||||
|
mu.Unlock()
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
fmt.Println("ingestion completed")
|
||||||
|
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {
|
||||||
|
ts := baset
|
||||||
|
|
||||||
|
type sample struct {
|
||||||
|
labels labels.Labels
|
||||||
|
value int64
|
||||||
|
ref *storage.SeriesRef
|
||||||
|
}
|
||||||
|
|
||||||
|
scrape := make([]*sample, 0, len(lbls))
|
||||||
|
|
||||||
|
for _, m := range lbls {
|
||||||
|
scrape = append(scrape, &sample{
|
||||||
|
labels: m,
|
||||||
|
value: 123456789,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
total := uint64(0)
|
||||||
|
|
||||||
|
for i := 0; i < scrapeCount; i++ {
|
||||||
|
app := b.storage.Appender(context.TODO())
|
||||||
|
ts += timeDelta
|
||||||
|
|
||||||
|
for _, s := range scrape {
|
||||||
|
s.value += 1000
|
||||||
|
|
||||||
|
var ref storage.SeriesRef
|
||||||
|
if s.ref != nil {
|
||||||
|
ref = *s.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := app.Append(ref, s.labels, ts, float64(s.value))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.ref == nil {
|
||||||
|
s.ref = &ref
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
}
|
||||||
|
if err := app.Commit(); err != nil {
|
||||||
|
return total, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return total, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *writeBenchmark) startProfiling() error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Start CPU profiling.
|
||||||
|
b.cpuprof, err = os.Create(filepath.Join(b.outPath, "cpu.prof"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bench: could not create cpu profile: %w", err)
|
||||||
|
}
|
||||||
|
if err := pprof.StartCPUProfile(b.cpuprof); err != nil {
|
||||||
|
return fmt.Errorf("bench: could not start CPU profile: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start memory profiling.
|
||||||
|
b.memprof, err = os.Create(filepath.Join(b.outPath, "mem.prof"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bench: could not create memory profile: %w", err)
|
||||||
|
}
|
||||||
|
runtime.MemProfileRate = 64 * 1024
|
||||||
|
|
||||||
|
// Start fatal profiling.
|
||||||
|
b.blockprof, err = os.Create(filepath.Join(b.outPath, "block.prof"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bench: could not create block profile: %w", err)
|
||||||
|
}
|
||||||
|
runtime.SetBlockProfileRate(20)
|
||||||
|
|
||||||
|
b.mtxprof, err = os.Create(filepath.Join(b.outPath, "mutex.prof"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bench: could not create mutex profile: %w", err)
|
||||||
|
}
|
||||||
|
runtime.SetMutexProfileFraction(20)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *writeBenchmark) stopProfiling() error {
|
||||||
|
if b.cpuprof != nil {
|
||||||
|
pprof.StopCPUProfile()
|
||||||
|
b.cpuprof.Close()
|
||||||
|
b.cpuprof = nil
|
||||||
|
}
|
||||||
|
if b.memprof != nil {
|
||||||
|
if err := pprof.Lookup("heap").WriteTo(b.memprof, 0); err != nil {
|
||||||
|
return fmt.Errorf("error writing mem profile: %w", err)
|
||||||
|
}
|
||||||
|
b.memprof.Close()
|
||||||
|
b.memprof = nil
|
||||||
|
}
|
||||||
|
if b.blockprof != nil {
|
||||||
|
if err := pprof.Lookup("block").WriteTo(b.blockprof, 0); err != nil {
|
||||||
|
return fmt.Errorf("error writing block profile: %w", err)
|
||||||
|
}
|
||||||
|
b.blockprof.Close()
|
||||||
|
b.blockprof = nil
|
||||||
|
runtime.SetBlockProfileRate(0)
|
||||||
|
}
|
||||||
|
if b.mtxprof != nil {
|
||||||
|
if err := pprof.Lookup("mutex").WriteTo(b.mtxprof, 0); err != nil {
|
||||||
|
return fmt.Errorf("error writing mutex profile: %w", err)
|
||||||
|
}
|
||||||
|
b.mtxprof.Close()
|
||||||
|
b.mtxprof = nil
|
||||||
|
runtime.SetMutexProfileFraction(0)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func measureTime(stage string, f func() error) (time.Duration, error) {
|
||||||
|
fmt.Printf(">> start stage=%s\n", stage)
|
||||||
|
start := time.Now()
|
||||||
|
if err := f(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(">> completed stage=%s duration=%s\n", stage, time.Since(start))
|
||||||
|
return time.Since(start), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
|
||||||
|
var mets []labels.Labels
|
||||||
|
hashes := map[uint64]struct{}{}
|
||||||
|
i := 0
|
||||||
|
|
||||||
|
for scanner.Scan() && i < n {
|
||||||
|
m := make([]labels.Label, 0, 10)
|
||||||
|
|
||||||
|
r := strings.NewReplacer("\"", "", "{", "", "}", "")
|
||||||
|
s := r.Replace(scanner.Text())
|
||||||
|
|
||||||
|
labelChunks := strings.Split(s, ",")
|
||||||
|
for _, labelChunk := range labelChunks {
|
||||||
|
split := strings.Split(labelChunk, ":")
|
||||||
|
m = append(m, labels.Label{Name: split[0], Value: split[1]})
|
||||||
|
}
|
||||||
|
ml := labels.New(m...) // This sorts by name - order of the k/v labels matters, don't assume we'll always receive them already sorted.
|
||||||
|
h := ml.Hash()
|
||||||
|
if _, ok := hashes[h]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mets = append(mets, ml)
|
||||||
|
hashes[h] = struct{}{}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return mets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func listBlocks(path string, humanReadable bool) error {
|
||||||
|
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||||
|
}()
|
||||||
|
blocks, err := db.Blocks()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
printBlocks(blocks, true, humanReadable)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
|
||||||
|
tw := tabwriter.NewWriter(os.Stdout, 13, 0, 2, ' ', 0)
|
||||||
|
defer tw.Flush()
|
||||||
|
|
||||||
|
if writeHeader {
|
||||||
|
fmt.Fprintln(tw, "BLOCK ULID\tMIN TIME\tMAX TIME\tDURATION\tNUM SAMPLES\tNUM CHUNKS\tNUM SERIES\tSIZE")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range blocks {
|
||||||
|
meta := b.Meta()
|
||||||
|
|
||||||
|
fmt.Fprintf(tw,
|
||||||
|
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
|
||||||
|
meta.ULID,
|
||||||
|
getFormatedTime(meta.MinTime, humanReadable),
|
||||||
|
getFormatedTime(meta.MaxTime, humanReadable),
|
||||||
|
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
|
||||||
|
meta.Stats.NumSamples,
|
||||||
|
meta.Stats.NumChunks,
|
||||||
|
meta.Stats.NumSeries,
|
||||||
|
getFormatedBytes(b.Size(), humanReadable),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFormatedTime(timestamp int64, humanReadable bool) string {
|
||||||
|
if humanReadable {
|
||||||
|
return time.Unix(timestamp/1000, 0).UTC().String()
|
||||||
|
}
|
||||||
|
return strconv.FormatInt(timestamp, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFormatedBytes(bytes int64, humanReadable bool) string {
|
||||||
|
if humanReadable {
|
||||||
|
return units.Base2Bytes(bytes).String()
|
||||||
|
}
|
||||||
|
return strconv.FormatInt(bytes, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
|
||||||
|
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockID == "" {
|
||||||
|
blockID, err = db.LastBlockID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := db.Block(blockID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return db, b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExtended bool, matchers string) error {
|
||||||
|
var (
|
||||||
|
selectors []*labels.Matcher
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
selectors, err = parser.ParseMetricSelector(matchers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db, block, err := openBlock(path, blockID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||||
|
}()
|
||||||
|
|
||||||
|
meta := block.Meta()
|
||||||
|
fmt.Printf("Block ID: %s\n", meta.ULID)
|
||||||
|
// Presume 1ms resolution that Prometheus uses.
|
||||||
|
fmt.Printf("Duration: %s\n", (time.Duration(meta.MaxTime-meta.MinTime) * 1e6).String())
|
||||||
|
fmt.Printf("Total Series: %d\n", meta.Stats.NumSeries)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
fmt.Printf("Matcher: %s\n", matchers)
|
||||||
|
}
|
||||||
|
ir, err := block.Index()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ir.Close()
|
||||||
|
|
||||||
|
allLabelNames, err := ir.LabelNames(ctx, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Label names: %d\n", len(allLabelNames))
|
||||||
|
|
||||||
|
type postingInfo struct {
|
||||||
|
key string
|
||||||
|
metric uint64
|
||||||
|
}
|
||||||
|
postingInfos := []postingInfo{}
|
||||||
|
|
||||||
|
printInfo := func(postingInfos []postingInfo) {
|
||||||
|
slices.SortFunc(postingInfos, func(a, b postingInfo) int { return int(b.metric) - int(a.metric) })
|
||||||
|
|
||||||
|
for i, pc := range postingInfos {
|
||||||
|
if i >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fmt.Printf("%d %s\n", pc.metric, pc.key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
labelsUncovered := map[string]uint64{}
|
||||||
|
labelpairsUncovered := map[string]uint64{}
|
||||||
|
labelpairsCount := map[string]uint64{}
|
||||||
|
entries := 0
|
||||||
|
var (
|
||||||
|
p index.Postings
|
||||||
|
refs []storage.SeriesRef
|
||||||
|
)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
p, err = tsdb.PostingsForMatchers(ctx, ir, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Expand refs first and cache in memory.
|
||||||
|
// So later we don't have to expand again.
|
||||||
|
refs, err = index.ExpandPostings(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Matched series: %d\n", len(refs))
|
||||||
|
p = index.NewListPostings(refs)
|
||||||
|
} else {
|
||||||
|
p, err = ir.Postings(ctx, "", "") // The special all key.
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chks := []chunks.Meta{}
|
||||||
|
builder := labels.ScratchBuilder{}
|
||||||
|
for p.Next() {
|
||||||
|
if err = ir.Series(p.At(), &builder, &chks); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Amount of the block time range not covered by this series.
|
||||||
|
uncovered := uint64(meta.MaxTime-meta.MinTime) - uint64(chks[len(chks)-1].MaxTime-chks[0].MinTime)
|
||||||
|
builder.Labels().Range(func(lbl labels.Label) {
|
||||||
|
key := lbl.Name + "=" + lbl.Value
|
||||||
|
labelsUncovered[lbl.Name] += uncovered
|
||||||
|
labelpairsUncovered[key] += uncovered
|
||||||
|
labelpairsCount[key]++
|
||||||
|
entries++
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if p.Err() != nil {
|
||||||
|
return p.Err()
|
||||||
|
}
|
||||||
|
fmt.Printf("Postings (unique label pairs): %d\n", len(labelpairsUncovered))
|
||||||
|
fmt.Printf("Postings entries (total label pairs): %d\n", entries)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
for k, m := range labelpairsUncovered {
|
||||||
|
postingInfos = append(postingInfos, postingInfo{k, uint64(float64(m) / float64(meta.MaxTime-meta.MinTime))})
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nLabel pairs most involved in churning:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
for k, m := range labelsUncovered {
|
||||||
|
postingInfos = append(postingInfos, postingInfo{k, uint64(float64(m) / float64(meta.MaxTime-meta.MinTime))})
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nLabel names most involved in churning:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
for k, m := range labelpairsCount {
|
||||||
|
postingInfos = append(postingInfos, postingInfo{k, m})
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nMost common label pairs:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
for _, n := range allLabelNames {
|
||||||
|
values, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var cumulativeLength uint64
|
||||||
|
for _, str := range values {
|
||||||
|
cumulativeLength += uint64(len(str))
|
||||||
|
}
|
||||||
|
postingInfos = append(postingInfos, postingInfo{n, cumulativeLength})
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nLabel names with highest cumulative label value length:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
for _, n := range allLabelNames {
|
||||||
|
lv, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
postingInfos = append(postingInfos, postingInfo{n, uint64(len(lv))})
|
||||||
|
}
|
||||||
|
fmt.Printf("\nHighest cardinality labels:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
postingInfos = postingInfos[:0]
|
||||||
|
lv, err := ir.SortedLabelValues(ctx, "__name__", selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range lv {
|
||||||
|
postings, err := ir.Postings(ctx, "__name__", n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
postings = index.Intersect(postings, index.NewListPostings(refs))
|
||||||
|
count := 0
|
||||||
|
for postings.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if postings.Err() != nil {
|
||||||
|
return postings.Err()
|
||||||
|
}
|
||||||
|
postingInfos = append(postingInfos, postingInfo{n, uint64(count)})
|
||||||
|
}
|
||||||
|
fmt.Printf("\nHighest cardinality metric names:\n")
|
||||||
|
printInfo(postingInfos)
|
||||||
|
|
||||||
|
if runExtended {
|
||||||
|
return analyzeCompaction(ctx, block, ir, selectors)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.IndexReader, matchers []*labels.Matcher) (err error) {
|
||||||
|
var postingsr index.Postings
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
postingsr, err = tsdb.PostingsForMatchers(ctx, indexr, matchers...)
|
||||||
|
} else {
|
||||||
|
n, v := index.AllPostingsKey()
|
||||||
|
postingsr, err = indexr.Postings(ctx, n, v)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkr, err := block.Chunks()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = tsdb_errors.NewMulti(err, chunkr.Close()).Err()
|
||||||
|
}()
|
||||||
|
|
||||||
|
const maxSamplesPerChunk = 120
|
||||||
|
nBuckets := 10
|
||||||
|
histogram := make([]int, nBuckets)
|
||||||
|
totalChunks := 0
|
||||||
|
var builder labels.ScratchBuilder
|
||||||
|
for postingsr.Next() {
|
||||||
|
var chks []chunks.Meta
|
||||||
|
if err := indexr.Series(postingsr.At(), &builder, &chks); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chk := range chks {
|
||||||
|
// Load the actual data of the chunk.
|
||||||
|
chk, err := chunkr.Chunk(chk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunkSize := math.Min(float64(chk.NumSamples()), maxSamplesPerChunk)
|
||||||
|
// Calculate the bucket for the chunk and increment it in the histogram.
|
||||||
|
bucket := int(math.Ceil(float64(nBuckets)*chunkSize/maxSamplesPerChunk)) - 1
|
||||||
|
histogram[bucket]++
|
||||||
|
totalChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nCompaction analysis:\n")
|
||||||
|
fmt.Println("Fullness: Amount of samples in chunks (100% is 120 samples)")
|
||||||
|
// Normalize absolute counts to percentages and print them out.
|
||||||
|
for bucket, count := range histogram {
|
||||||
|
percentage := 100.0 * count / totalChunks
|
||||||
|
fmt.Printf("%7d%%: ", (bucket+1)*10)
|
||||||
|
for j := 0; j < percentage; j++ {
|
||||||
|
fmt.Printf("#")
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) {
|
||||||
|
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||||
|
}()
|
||||||
|
q, err := db.Querier(mint, maxt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer q.Close()
|
||||||
|
|
||||||
|
matchers, err := parser.ParseMetricSelector(match)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ss := q.Select(ctx, false, nil, matchers...)
|
||||||
|
|
||||||
|
for ss.Next() {
|
||||||
|
series := ss.At()
|
||||||
|
lbs := series.Labels()
|
||||||
|
it := series.Iterator(nil)
|
||||||
|
for it.Next() == chunkenc.ValFloat {
|
||||||
|
ts, val := it.At()
|
||||||
|
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||||
|
}
|
||||||
|
for it.Next() == chunkenc.ValFloatHistogram {
|
||||||
|
ts, fh := it.AtFloatHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
|
||||||
|
}
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
|
||||||
|
}
|
||||||
|
if it.Err() != nil {
|
||||||
|
return ss.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ws := ss.Warnings(); len(ws) > 0 {
|
||||||
|
return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ss.Err() != nil {
|
||||||
|
return ss.Err()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkErr(err error) int {
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
|
||||||
|
inputFile, err := fileutil.OpenMmapFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return checkErr(err)
|
||||||
|
}
|
||||||
|
defer inputFile.Close()
|
||||||
|
|
||||||
|
if err := os.MkdirAll(outputDir, 0o777); err != nil {
|
||||||
|
return checkErr(fmt.Errorf("create output dir: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration))
|
||||||
|
}
|
|
@ -15,8 +15,8 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -25,26 +25,29 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/pkg/errors"
|
"github.com/prometheus/common/model"
|
||||||
yaml "gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||||
// More info about the file format can be found in the docs.
|
// More info about the file format can be found in the docs.
|
||||||
func RulesUnitTest(files ...string) int {
|
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if errs := ruleUnitTest(f); errs != nil {
|
if errs := ruleUnitTest(f, queryOpts); errs != nil {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
fmt.Println()
|
||||||
}
|
}
|
||||||
failed = true
|
failed = true
|
||||||
} else {
|
} else {
|
||||||
|
@ -53,15 +56,15 @@ func RulesUnitTest(files ...string) int {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
if failed {
|
if failed {
|
||||||
return 1
|
return failureExitCode
|
||||||
}
|
}
|
||||||
return 0
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func ruleUnitTest(filename string) []error {
|
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
||||||
fmt.Println("Unit Testing: ", filename)
|
fmt.Println("Unit Testing: ", filename)
|
||||||
|
|
||||||
b, err := ioutil.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []error{err}
|
return []error{err}
|
||||||
}
|
}
|
||||||
|
@ -75,22 +78,17 @@ func ruleUnitTest(filename string) []error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if unitTestInp.EvaluationInterval == 0 {
|
if unitTestInp.EvaluationInterval == 0 {
|
||||||
unitTestInp.EvaluationInterval = 1 * time.Minute
|
unitTestInp.EvaluationInterval = model.Duration(1 * time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bounds for evaluating the rules.
|
evalInterval := time.Duration(unitTestInp.EvaluationInterval)
|
||||||
mint := time.Unix(0, 0)
|
|
||||||
maxd := unitTestInp.maxEvalTime()
|
|
||||||
maxt := mint.Add(maxd)
|
|
||||||
// Rounding off to nearest Eval time (> maxt).
|
|
||||||
maxt = maxt.Add(unitTestInp.EvaluationInterval / 2).Round(unitTestInp.EvaluationInterval)
|
|
||||||
|
|
||||||
// Giving number for groups mentioned in the file for ordering.
|
// Giving number for groups mentioned in the file for ordering.
|
||||||
// Lower number group should be evaluated before higher number group.
|
// Lower number group should be evaluated before higher number group.
|
||||||
groupOrderMap := make(map[string]int)
|
groupOrderMap := make(map[string]int)
|
||||||
for i, gn := range unitTestInp.GroupEvalOrder {
|
for i, gn := range unitTestInp.GroupEvalOrder {
|
||||||
if _, ok := groupOrderMap[gn]; ok {
|
if _, ok := groupOrderMap[gn]; ok {
|
||||||
return []error{errors.Errorf("group name repeated in evaluation order: %s", gn)}
|
return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)}
|
||||||
}
|
}
|
||||||
groupOrderMap[gn] = i
|
groupOrderMap[gn] = i
|
||||||
}
|
}
|
||||||
|
@ -98,8 +96,7 @@ func ruleUnitTest(filename string) []error {
|
||||||
// Testing.
|
// Testing.
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, t := range unitTestInp.Tests {
|
for _, t := range unitTestInp.Tests {
|
||||||
ers := t.test(mint, maxt, unitTestInp.EvaluationInterval, groupOrderMap,
|
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
|
||||||
unitTestInp.RuleFiles...)
|
|
||||||
if ers != nil {
|
if ers != nil {
|
||||||
errs = append(errs, ers...)
|
errs = append(errs, ers...)
|
||||||
}
|
}
|
||||||
|
@ -113,21 +110,10 @@ func ruleUnitTest(filename string) []error {
|
||||||
|
|
||||||
// unitTestFile holds the contents of a single unit test file.
|
// unitTestFile holds the contents of a single unit test file.
|
||||||
type unitTestFile struct {
|
type unitTestFile struct {
|
||||||
RuleFiles []string `yaml:"rule_files"`
|
RuleFiles []string `yaml:"rule_files"`
|
||||||
EvaluationInterval time.Duration `yaml:"evaluation_interval,omitempty"`
|
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||||
GroupEvalOrder []string `yaml:"group_eval_order"`
|
GroupEvalOrder []string `yaml:"group_eval_order"`
|
||||||
Tests []testGroup `yaml:"tests"`
|
Tests []testGroup `yaml:"tests"`
|
||||||
}
|
|
||||||
|
|
||||||
func (utf *unitTestFile) maxEvalTime() time.Duration {
|
|
||||||
var maxd time.Duration
|
|
||||||
for _, t := range utf.Tests {
|
|
||||||
d := t.maxEvalTime()
|
|
||||||
if d > maxd {
|
|
||||||
maxd = d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return maxd
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveAndGlobFilepaths joins all relative paths in a configuration
|
// resolveAndGlobFilepaths joins all relative paths in a configuration
|
||||||
|
@ -145,7 +131,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(m) <= 0 {
|
if len(m) == 0 {
|
||||||
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
||||||
}
|
}
|
||||||
globbedFiles = append(globbedFiles, m...)
|
globbedFiles = append(globbedFiles, m...)
|
||||||
|
@ -156,21 +142,24 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
||||||
|
|
||||||
// testGroup is a group of input series and tests associated with it.
|
// testGroup is a group of input series and tests associated with it.
|
||||||
type testGroup struct {
|
type testGroup struct {
|
||||||
Interval time.Duration `yaml:"interval"`
|
Interval model.Duration `yaml:"interval"`
|
||||||
InputSeries []series `yaml:"input_series"`
|
InputSeries []series `yaml:"input_series"`
|
||||||
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
|
AlertRuleTests []alertTestCase `yaml:"alert_rule_test,omitempty"`
|
||||||
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
|
PromqlExprTests []promqlTestCase `yaml:"promql_expr_test,omitempty"`
|
||||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||||
|
ExternalURL string `yaml:"external_url,omitempty"`
|
||||||
|
TestGroupName string `yaml:"name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// test performs the unit tests.
|
// test performs the unit tests.
|
||||||
func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, groupOrderMap map[string]int, ruleFiles ...string) []error {
|
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, ruleFiles ...string) []error {
|
||||||
// Setup testing suite.
|
// Setup testing suite.
|
||||||
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString())
|
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []error{err}
|
return []error{err}
|
||||||
}
|
}
|
||||||
defer suite.Close()
|
defer suite.Close()
|
||||||
|
suite.SubqueryInterval = evalInterval
|
||||||
|
|
||||||
// Load the rule files.
|
// Load the rule files.
|
||||||
opts := &rules.ManagerOptions{
|
opts := &rules.ManagerOptions{
|
||||||
|
@ -181,23 +170,34 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
Logger: log.NewNopLogger(),
|
Logger: log.NewNopLogger(),
|
||||||
}
|
}
|
||||||
m := rules.NewManager(opts)
|
m := rules.NewManager(opts)
|
||||||
groupsMap, ers := m.LoadGroups(tg.Interval, tg.ExternalLabels, ruleFiles...)
|
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||||
if ers != nil {
|
if ers != nil {
|
||||||
return ers
|
return ers
|
||||||
}
|
}
|
||||||
groups := orderedGroups(groupsMap, groupOrderMap)
|
groups := orderedGroups(groupsMap, groupOrderMap)
|
||||||
|
|
||||||
|
// Bounds for evaluating the rules.
|
||||||
|
mint := time.Unix(0, 0).UTC()
|
||||||
|
maxt := mint.Add(tg.maxEvalTime())
|
||||||
|
|
||||||
// Pre-processing some data for testing alerts.
|
// Pre-processing some data for testing alerts.
|
||||||
// All this preparation is so that we can test alerts as we evaluate the rules.
|
// All this preparation is so that we can test alerts as we evaluate the rules.
|
||||||
// This avoids storing them in memory, as the number of evals might be high.
|
// This avoids storing them in memory, as the number of evals might be high.
|
||||||
|
|
||||||
// All the `eval_time` for which we have unit tests for alerts.
|
// All the `eval_time` for which we have unit tests for alerts.
|
||||||
alertEvalTimesMap := map[time.Duration]struct{}{}
|
alertEvalTimesMap := map[model.Duration]struct{}{}
|
||||||
// Map of all the eval_time+alertname combination present in the unit tests.
|
// Map of all the eval_time+alertname combination present in the unit tests.
|
||||||
alertsInTest := make(map[time.Duration]map[string]struct{})
|
alertsInTest := make(map[model.Duration]map[string]struct{})
|
||||||
// Map of all the unit tests for given eval_time.
|
// Map of all the unit tests for given eval_time.
|
||||||
alertTests := make(map[time.Duration][]alertTestCase)
|
alertTests := make(map[model.Duration][]alertTestCase)
|
||||||
for _, alert := range tg.AlertRuleTests {
|
for _, alert := range tg.AlertRuleTests {
|
||||||
|
if alert.Alertname == "" {
|
||||||
|
var testGroupLog string
|
||||||
|
if tg.TestGroupName != "" {
|
||||||
|
testGroupLog = fmt.Sprintf(" (in TestGroup %s)", tg.TestGroupName)
|
||||||
|
}
|
||||||
|
return []error{fmt.Errorf("an item under alert_rule_test misses required attribute alertname at eval_time %v%s", alert.EvalTime, testGroupLog)}
|
||||||
|
}
|
||||||
alertEvalTimesMap[alert.EvalTime] = struct{}{}
|
alertEvalTimesMap[alert.EvalTime] = struct{}{}
|
||||||
|
|
||||||
if _, ok := alertsInTest[alert.EvalTime]; !ok {
|
if _, ok := alertsInTest[alert.EvalTime]; !ok {
|
||||||
|
@ -207,7 +207,7 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
|
|
||||||
alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert)
|
alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert)
|
||||||
}
|
}
|
||||||
alertEvalTimes := make([]time.Duration, 0, len(alertEvalTimesMap))
|
alertEvalTimes := make([]model.Duration, 0, len(alertEvalTimesMap))
|
||||||
for k := range alertEvalTimesMap {
|
for k := range alertEvalTimesMap {
|
||||||
alertEvalTimes = append(alertEvalTimes, k)
|
alertEvalTimes = append(alertEvalTimes, k)
|
||||||
}
|
}
|
||||||
|
@ -218,9 +218,20 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
// Current index in alertEvalTimes what we are looking at.
|
// Current index in alertEvalTimes what we are looking at.
|
||||||
curr := 0
|
curr := 0
|
||||||
|
|
||||||
|
for _, g := range groups {
|
||||||
|
for _, r := range g.Rules() {
|
||||||
|
if alertRule, ok := r.(*rules.AlertingRule); ok {
|
||||||
|
// Mark alerting rules as restored, to ensure the ALERTS timeseries is
|
||||||
|
// created when they run.
|
||||||
|
alertRule.SetRestored(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
for ts := mint; ts.Before(maxt); ts = ts.Add(evalInterval) {
|
for ts := mint; ts.Before(maxt) || ts.Equal(maxt); ts = ts.Add(evalInterval) {
|
||||||
// Collects the alerts asked for unit testing.
|
// Collects the alerts asked for unit testing.
|
||||||
|
var evalErrs []error
|
||||||
suite.WithSamplesTill(ts, func(err error) {
|
suite.WithSamplesTill(ts, func(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
@ -230,19 +241,22 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
g.Eval(suite.Context(), ts)
|
g.Eval(suite.Context(), ts)
|
||||||
for _, r := range g.Rules() {
|
for _, r := range g.Rules() {
|
||||||
if r.LastError() != nil {
|
if r.LastError() != nil {
|
||||||
errs = append(errs, errors.Errorf(" rule: %s, time: %s, err: %v",
|
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %v",
|
||||||
r.Name(), ts.Sub(time.Unix(0, 0)), r.LastError()))
|
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if len(errs) > 0 {
|
errs = append(errs, evalErrs...)
|
||||||
|
// Only end testing at this point if errors occurred evaluating above,
|
||||||
|
// rather than any test failures already collected in errs.
|
||||||
|
if len(evalErrs) > 0 {
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= alertEvalTimes[curr] &&
|
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
|
||||||
alertEvalTimes[curr] < ts.Add(evalInterval).Sub(mint)) {
|
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,8 +285,8 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
for _, a := range ar.ActiveAlerts() {
|
for _, a := range ar.ActiveAlerts() {
|
||||||
if a.State == rules.StateFiring {
|
if a.State == rules.StateFiring {
|
||||||
alerts = append(alerts, labelAndAnnotation{
|
alerts = append(alerts, labelAndAnnotation{
|
||||||
Labels: append(labels.Labels{}, a.Labels...),
|
Labels: a.Labels.Copy(),
|
||||||
Annotations: append(labels.Labels{}, a.Annotations...),
|
Annotations: a.Annotations.Copy(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -300,17 +314,18 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if gotAlerts.Len() != expAlerts.Len() {
|
sort.Sort(gotAlerts)
|
||||||
errs = append(errs, errors.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v",
|
sort.Sort(expAlerts)
|
||||||
testcase.Alertname, testcase.EvalTime.String(), expAlerts.String(), gotAlerts.String()))
|
|
||||||
} else {
|
|
||||||
sort.Sort(gotAlerts)
|
|
||||||
sort.Sort(expAlerts)
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(expAlerts, gotAlerts) {
|
if !reflect.DeepEqual(expAlerts, gotAlerts) {
|
||||||
errs = append(errs, errors.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v",
|
var testName string
|
||||||
testcase.Alertname, testcase.EvalTime.String(), expAlerts.String(), gotAlerts.String()))
|
if tg.TestGroupName != "" {
|
||||||
|
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
|
||||||
}
|
}
|
||||||
|
expString := indentLines(expAlerts.String(), " ")
|
||||||
|
gotString := indentLines(gotAlerts.String(), " ")
|
||||||
|
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
|
||||||
|
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,10 +336,10 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou
|
||||||
// Checking promql expressions.
|
// Checking promql expressions.
|
||||||
Outer:
|
Outer:
|
||||||
for _, testCase := range tg.PromqlExprTests {
|
for _, testCase := range tg.PromqlExprTests {
|
||||||
got, err := query(suite.Context(), testCase.Expr, mint.Add(testCase.EvalTime),
|
got, err := query(suite.Context(), testCase.Expr, mint.Add(time.Duration(testCase.EvalTime)),
|
||||||
suite.QueryEngine(), suite.Queryable())
|
suite.QueryEngine(), suite.Queryable())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr,
|
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr,
|
||||||
testCase.EvalTime.String(), err.Error()))
|
testCase.EvalTime.String(), err.Error()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -332,23 +347,39 @@ Outer:
|
||||||
var gotSamples []parsedSample
|
var gotSamples []parsedSample
|
||||||
for _, s := range got {
|
for _, s := range got {
|
||||||
gotSamples = append(gotSamples, parsedSample{
|
gotSamples = append(gotSamples, parsedSample{
|
||||||
Labels: s.Metric.Copy(),
|
Labels: s.Metric.Copy(),
|
||||||
Value: s.V,
|
Value: s.F,
|
||||||
|
Histogram: promql.HistogramTestExpression(s.H),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var expSamples []parsedSample
|
var expSamples []parsedSample
|
||||||
for _, s := range testCase.ExpSamples {
|
for _, s := range testCase.ExpSamples {
|
||||||
lb, err := promql.ParseMetric(s.Labels)
|
lb, err := parser.ParseMetric(s.Labels)
|
||||||
|
var hist *histogram.FloatHistogram
|
||||||
|
if err == nil && s.Histogram != "" {
|
||||||
|
_, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram)
|
||||||
|
switch {
|
||||||
|
case parseErr != nil:
|
||||||
|
err = parseErr
|
||||||
|
case len(values) != 1:
|
||||||
|
err = fmt.Errorf("expected 1 value, got %d", len(values))
|
||||||
|
case values[0].Histogram == nil:
|
||||||
|
err = fmt.Errorf("expected histogram, got %v", values[0])
|
||||||
|
default:
|
||||||
|
hist = values[0].Histogram
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrapf(err, "labels %q", s.Labels)
|
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
||||||
errs = append(errs, errors.Errorf(" expr: %q, time: %s, err: %s", testCase.Expr,
|
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
||||||
testCase.EvalTime.String(), err.Error()))
|
testCase.EvalTime.String(), err))
|
||||||
continue Outer
|
continue Outer
|
||||||
}
|
}
|
||||||
expSamples = append(expSamples, parsedSample{
|
expSamples = append(expSamples, parsedSample{
|
||||||
Labels: lb,
|
Labels: lb,
|
||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
|
Histogram: promql.HistogramTestExpression(hist),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,7 +390,7 @@ Outer:
|
||||||
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
|
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
|
||||||
})
|
})
|
||||||
if !reflect.DeepEqual(expSamples, gotSamples) {
|
if !reflect.DeepEqual(expSamples, gotSamples) {
|
||||||
errs = append(errs, errors.Errorf(" expr: %q, time: %s,\n exp:%#v\n got:%#v", testCase.Expr,
|
errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
|
||||||
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
|
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -372,15 +403,14 @@ Outer:
|
||||||
|
|
||||||
// seriesLoadingString returns the input series in PromQL notation.
|
// seriesLoadingString returns the input series in PromQL notation.
|
||||||
func (tg *testGroup) seriesLoadingString() string {
|
func (tg *testGroup) seriesLoadingString() string {
|
||||||
result := ""
|
result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
|
||||||
result += "load " + shortDuration(tg.Interval) + "\n"
|
|
||||||
for _, is := range tg.InputSeries {
|
for _, is := range tg.InputSeries {
|
||||||
result += " " + is.Series + " " + is.Values + "\n"
|
result += fmt.Sprintf(" %v %v\n", is.Series, is.Values)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func shortDuration(d time.Duration) string {
|
func shortDuration(d model.Duration) string {
|
||||||
s := d.String()
|
s := d.String()
|
||||||
if strings.HasSuffix(s, "m0s") {
|
if strings.HasSuffix(s, "m0s") {
|
||||||
s = s[:len(s)-2]
|
s = s[:len(s)-2]
|
||||||
|
@ -406,7 +436,7 @@ func orderedGroups(groupsMap map[string]*rules.Group, groupOrderMap map[string]i
|
||||||
|
|
||||||
// maxEvalTime returns the max eval time among all alert and promql unit tests.
|
// maxEvalTime returns the max eval time among all alert and promql unit tests.
|
||||||
func (tg *testGroup) maxEvalTime() time.Duration {
|
func (tg *testGroup) maxEvalTime() time.Duration {
|
||||||
var maxd time.Duration
|
var maxd model.Duration
|
||||||
for _, alert := range tg.AlertRuleTests {
|
for _, alert := range tg.AlertRuleTests {
|
||||||
if alert.EvalTime > maxd {
|
if alert.EvalTime > maxd {
|
||||||
maxd = alert.EvalTime
|
maxd = alert.EvalTime
|
||||||
|
@ -417,11 +447,11 @@ func (tg *testGroup) maxEvalTime() time.Duration {
|
||||||
maxd = pet.EvalTime
|
maxd = pet.EvalTime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return maxd
|
return time.Duration(maxd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
||||||
q, err := engine.NewInstantQuery(qu, qs, t)
|
q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -434,7 +464,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
||||||
return v, nil
|
return v, nil
|
||||||
case promql.Scalar:
|
case promql.Scalar:
|
||||||
return promql.Vector{promql.Sample{
|
return promql.Vector{promql.Sample{
|
||||||
Point: promql.Point(v),
|
T: v.T,
|
||||||
|
F: v.V,
|
||||||
Metric: labels.Labels{},
|
Metric: labels.Labels{},
|
||||||
}}, nil
|
}}, nil
|
||||||
default:
|
default:
|
||||||
|
@ -442,6 +473,23 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// indentLines prefixes each line in the supplied string with the given "indent"
|
||||||
|
// string.
|
||||||
|
func indentLines(lines, indent string) string {
|
||||||
|
sb := strings.Builder{}
|
||||||
|
n := strings.Split(lines, "\n")
|
||||||
|
for i, l := range n {
|
||||||
|
if i > 0 {
|
||||||
|
sb.WriteString(indent)
|
||||||
|
}
|
||||||
|
sb.WriteString(l)
|
||||||
|
if i != len(n)-1 {
|
||||||
|
sb.WriteRune('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
type labelsAndAnnotations []labelAndAnnotation
|
type labelsAndAnnotations []labelAndAnnotation
|
||||||
|
|
||||||
func (la labelsAndAnnotations) Len() int { return len(la) }
|
func (la labelsAndAnnotations) Len() int { return len(la) }
|
||||||
|
@ -458,11 +506,11 @@ func (la labelsAndAnnotations) String() string {
|
||||||
if len(la) == 0 {
|
if len(la) == 0 {
|
||||||
return "[]"
|
return "[]"
|
||||||
}
|
}
|
||||||
s := "[" + la[0].String()
|
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
|
||||||
for _, l := range la[1:] {
|
for i, l := range la[1:] {
|
||||||
s += ", " + l.String()
|
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
|
||||||
}
|
}
|
||||||
s += "]"
|
s += "\n]"
|
||||||
|
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -473,7 +521,7 @@ type labelAndAnnotation struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (la *labelAndAnnotation) String() string {
|
func (la *labelAndAnnotation) String() string {
|
||||||
return "Labels:" + la.Labels.String() + " Annotations:" + la.Annotations.String()
|
return "Labels:" + la.Labels.String() + "\nAnnotations:" + la.Annotations.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
type series struct {
|
type series struct {
|
||||||
|
@ -482,9 +530,9 @@ type series struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type alertTestCase struct {
|
type alertTestCase struct {
|
||||||
EvalTime time.Duration `yaml:"eval_time"`
|
EvalTime model.Duration `yaml:"eval_time"`
|
||||||
Alertname string `yaml:"alertname"`
|
Alertname string `yaml:"alertname"`
|
||||||
ExpAlerts []alert `yaml:"exp_alerts"`
|
ExpAlerts []alert `yaml:"exp_alerts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type alert struct {
|
type alert struct {
|
||||||
|
@ -493,20 +541,22 @@ type alert struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type promqlTestCase struct {
|
type promqlTestCase struct {
|
||||||
Expr string `yaml:"expr"`
|
Expr string `yaml:"expr"`
|
||||||
EvalTime time.Duration `yaml:"eval_time"`
|
EvalTime model.Duration `yaml:"eval_time"`
|
||||||
ExpSamples []sample `yaml:"exp_samples"`
|
ExpSamples []sample `yaml:"exp_samples"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type sample struct {
|
type sample struct {
|
||||||
Labels string `yaml:"labels"`
|
Labels string `yaml:"labels"`
|
||||||
Value float64 `yaml:"value"`
|
Value float64 `yaml:"value"`
|
||||||
|
Histogram string `yaml:"histogram"` // A non-empty string means Value is ignored.
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsedSample is a sample with parsed Labels.
|
// parsedSample is a sample with parsed Labels.
|
||||||
type parsedSample struct {
|
type parsedSample struct {
|
||||||
Labels labels.Labels
|
Labels labels.Labels
|
||||||
Value float64
|
Value float64
|
||||||
|
Histogram string // TestExpression() of histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsedSamplesString(pss []parsedSample) string {
|
func parsedSamplesString(pss []parsedSample) string {
|
||||||
|
@ -521,5 +571,8 @@ func parsedSamplesString(pss []parsedSample) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *parsedSample) String() string {
|
func (ps *parsedSample) String() string {
|
||||||
|
if ps.Histogram != "" {
|
||||||
|
return ps.Labels.String() + " " + ps.Histogram
|
||||||
|
}
|
||||||
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
||||||
}
|
}
|
||||||
|
|
123
cmd/promtool/unittest_test.go
Normal file
123
cmd/promtool/unittest_test.go
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRulesUnitTest(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
files []string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
queryOpts promql.LazyLoaderOpts
|
||||||
|
want int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Passing Unit Tests",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/unittest.yml"},
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Long evaluation interval",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/long-period.yml"},
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Bad input series",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/bad-input-series.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Bad PromQL",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/bad-promql.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Bad rules (syntax error)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/bad-rules-syntax-test.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Bad rules (error evaluating)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/bad-rules-error-test.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Simple failing test",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/failing.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Disabled feature (@ modifier)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/at-modifier-test.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Enabled feature (@ modifier)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/at-modifier-test.yml"},
|
||||||
|
},
|
||||||
|
queryOpts: promql.LazyLoaderOpts{
|
||||||
|
EnableAtModifier: true,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Disabled feature (negative offset)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/negative-offset-test.yml"},
|
||||||
|
},
|
||||||
|
want: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Enabled feature (negative offset)",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/negative-offset-test.yml"},
|
||||||
|
},
|
||||||
|
queryOpts: promql.LazyLoaderOpts{
|
||||||
|
EnableNegativeOffset: true,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := RulesUnitTest(tt.queryOpts, tt.args.files...); got != tt.want {
|
||||||
|
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +0,0 @@
|
||||||
## Prometheus Community Code of Conduct
|
|
||||||
|
|
||||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
|
788
config/config.go
788
config/config.go
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package config
|
package config
|
||||||
|
@ -24,5 +25,4 @@ var ruleFilesExpectedConf = &Config{
|
||||||
"testdata/rules/second.rules",
|
"testdata/rules/second.rules",
|
||||||
"/absolute/third.rules",
|
"/absolute/third.rules",
|
||||||
},
|
},
|
||||||
original: "",
|
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -22,5 +22,4 @@ var ruleFilesExpectedConf = &Config{
|
||||||
"testdata\\rules\\second.rules",
|
"testdata\\rules\\second.rules",
|
||||||
"c:\\absolute\\third.rules",
|
"c:\\absolute\\third.rules",
|
||||||
},
|
},
|
||||||
original: "",
|
|
||||||
}
|
}
|
||||||
|
|
2
config/testdata/agent_mode.good.yml
vendored
Normal file
2
config/testdata/agent_mode.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
remote_write:
|
||||||
|
- url: http://remote1/push
|
6
config/testdata/agent_mode.with_alert_manager.yml
vendored
Normal file
6
config/testdata/agent_mode.with_alert_manager.yml
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- scheme: https
|
||||||
|
static_configs:
|
||||||
|
- targets:
|
||||||
|
- "1.2.3.4:9093"
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue