Merge branch 'main' into sparsehistogram

This commit is contained in:
beorn7 2022-05-04 13:37:13 +02:00
commit 3bc711e333
135 changed files with 1448 additions and 687 deletions

View file

@ -58,10 +58,13 @@ jobs:
keys: keys:
- v3-npm-deps-{{ checksum "web/ui/package-lock.json" }} - v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
- v3-npm-deps- - v3-npm-deps-
- run: make ui-install - run: make assets-tarball
- run: make ui-lint - run: make ui-lint
- run: make ui-build-module
- run: make ui-test - run: make ui-test
- persist_to_workspace:
root: .
paths:
- .tarballs
- save_cache: - save_cache:
key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }} key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
paths: paths:
@ -145,7 +148,7 @@ workflows:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
filters: filters:
tags: tags:
ignore: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ ignore: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
branches: branches:
ignore: /^(main|release-.*|.*build-all.*)$/ ignore: /^(main|release-.*|.*build-all.*)$/
- prometheus/build: - prometheus/build:
@ -155,7 +158,7 @@ workflows:
branches: branches:
only: /^(main|release-.*|.*build-all.*)$/ only: /^(main|release-.*|.*build-all.*)$/
tags: tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
- prometheus/publish_main: - prometheus/publish_main:
context: org-context context: org-context
requires: requires:
@ -174,7 +177,7 @@ workflows:
- build_all - build_all
filters: filters:
tags: tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ only: /^v2(\.[0-9]+){2}(-.+|[^-.]*)$/
branches: branches:
ignore: /.*/ ignore: /.*/
image: circleci/golang:1-node image: circleci/golang:1-node

View file

@ -1,57 +0,0 @@
---
name: Bug report
about: Create a report to help us improve.
title: ''
assignees: ''
---
<!--
Please do *NOT* ask support questions in Github issues.
If your issue is not a feature request or bug report use our
community support.
https://prometheus.io/community/
There is also commercial support available.
https://prometheus.io/support-training/
-->
**What did you do?**
**What did you expect to see?**
**What did you see instead? Under which circumstances?**
**Environment**
* System information:
insert output of `uname -srm` here
* Prometheus version:
insert output of `prometheus --version` here
* Alertmanager version:
insert output of `alertmanager --version` here (if relevant to the issue)
* Prometheus configuration file:
```
insert configuration here
```
* Alertmanager configuration file:
```
insert configuration here (if relevant to the issue)
```
* Logs:
```
insert Prometheus and Alertmanager logs relevant to the issue here
```

74
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View file

@ -0,0 +1,74 @@
---
name: Bug report
description: Create a report to help us improve.
body:
- type: markdown
attributes:
value: |
Thank you for opening a bug report for Prometheus.
Please do *NOT* ask support questions in Github issues.
If your issue is not a feature request or bug report use our [community support](https://prometheus.io/community/).
There is also [commercial support](https://prometheus.io/support-training/) available.
- type: textarea
attributes:
label: What did you do?
description: Please provide steps for us to reproduce this issue.
validations:
required: true
- type: textarea
attributes:
label: What did you expect to see?
- type: textarea
attributes:
label: What did you see instead? Under which circumstances?
validations:
required: true
- type: markdown
attributes:
value: |
## Environment
- type: input
attributes:
label: System information
description: insert output of `uname -srm` here, or operating system version
placeholder: e.g. Linux 5.16.15 x86_64
- type: textarea
attributes:
label: Prometheus version
description: Insert output of `prometheus --version` here.
render: text
placeholder: |
e.g. prometheus, version 2.23.0 (branch: HEAD, revision: 26d89b4b0776fe4cd5a3656dfa520f119a375273)
build user: root@37609b3a0a21
build date: 20201126-10:56:17
go version: go1.15.5
platform: linux/amd64
- type: textarea
attributes:
label: Prometheus configuration file
description: Insert relevant configuration here. Don't forget to remove secrets.
render: yaml
- type: textarea
attributes:
label: Alertmanager version
description: Insert output of `alertmanager --version` here (if relevant to the issue).
render: text
placeholder: |
e.g. alertmanager, version 0.22.2 (branch: HEAD, revision: 44f8adc06af5101ad64bd8b9c8b18273f2922051)
build user: root@b595c7f32520
build date: 20210602-07:50:37
go version: go1.16.4
platform: linux/amd64
- type: textarea
attributes:
label: Alertmanager configuration file
description: Insert relevant configuration here. Don't forget to remove secrets.
render: yaml
- type: textarea
attributes:
label: Logs
description: Insert Prometheus and Alertmanager logs relevant to the issue here.
render: text

View file

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.3.1 - uses: bufbuild/buf-setup-action@v1.4.0
- uses: bufbuild/buf-lint-action@v1 - uses: bufbuild/buf-lint-action@v1
with: with:
input: 'prompb' input: 'prompb'

View file

@ -9,7 +9,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: bufbuild/buf-setup-action@v1.3.1 - uses: bufbuild/buf-setup-action@v1.4.0
- uses: bufbuild/buf-lint-action@v1 - uses: bufbuild/buf-lint-action@v1
with: with:
input: 'prompb' input: 'prompb'

View file

@ -1,21 +1,10 @@
# For most projects, this workflow file will not need changing; you simply need ---
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL" name: "CodeQL"
on: on:
push: push:
branches: [main, release-*] branches: [main, release-*]
pull_request: pull_request:
# The branches below must be a subset of the branches above
branches: [main] branches: [main]
schedule: schedule:
- cron: "26 14 * * 1" - cron: "26 14 * * 1"
@ -24,44 +13,28 @@ jobs:
analyze: analyze:
name: Analyze name: Analyze
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
security-events: write
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
language: ["go", "javascript"] language: ["go", "javascript"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '>=1.17 <1.18'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v1 uses: github/codeql-action/init@v2
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v1 uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1 uses: github/codeql-action/analyze@v2

View file

@ -16,6 +16,7 @@ linters:
- misspell - misspell
issues: issues:
max-same-issues: 0
exclude-rules: exclude-rules:
- path: _test.go - path: _test.go
linters: linters:
@ -29,6 +30,7 @@ linters-settings:
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic" - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
- github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- io/ioutil: "Use corresponding 'os' or 'io' functions instead."
- regexp: "Use github.com/grafana/regexp instead of regexp" - regexp: "Use github.com/grafana/regexp instead of regexp"
errcheck: errcheck:
exclude: scripts/errcheck_excludes.txt exclude: scripts/errcheck_excludes.txt

View file

@ -18,6 +18,8 @@ build:
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
tarball: tarball:
# Whenever there are new files to include in the tarball,
# remember to make sure the new files will be generated after `make build`.
files: files:
- consoles - consoles
- console_libraries - console_libraries

View file

@ -1,3 +1,34 @@
# Changelog
## 2.35.0 / 2022-04-21
This Prometheus release is built with go1.18, which contains two noticeable changes related to TLS:
1. [TLS 1.0 and 1.1 disabled by default client-side](https://go.dev/doc/go1.18#tls10).
Prometheus users can override this with the `min_version` parameter of [tls_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
2. [Certificates signed with the SHA-1 hash function are rejected](https://go.dev/doc/go1.18#sha1). This doesn't apply to self-signed root certificates.
* [CHANGE] TSDB: Delete `*.tmp` WAL files when Prometheus starts. #10317
* [CHANGE] promtool: Add new flag `--lint` (enabled by default) for the commands `check rules` and `check config`, resulting in a new exit code (`3`) for linter errors. #10435
* [FEATURE] Support for automatically setting the variable `GOMAXPROCS` to the container CPU limit. Enable with the flag `--enable-feature=auto-gomaxprocs`. #10498
* [FEATURE] PromQL: Extend statistics with total and peak number of samples in a query. Additionally, per-step statistics are available with --enable-feature=promql-per-step-stats and using `stats=all` in the query API.
Enable with the flag `--enable-feature=per-step-stats`. #10369
* [ENHANCEMENT] Prometheus is built with Go 1.18. #10501
* [ENHANCEMENT] TSDB: more efficient sorting of postings read from WAL at startup. #10500
* [ENHANCEMENT] Azure SD: Add metric to track Azure SD failures. #10476
* [ENHANCEMENT] Azure SD: Add an optional `resource_group` configuration. #10365
* [ENHANCEMENT] Kubernetes SD: Support `discovery.k8s.io/v1` `EndpointSlice` (previously only `discovery.k8s.io/v1beta1` `EndpointSlice` was supported). #9570
* [ENHANCEMENT] Kubernetes SD: Allow attaching node metadata to discovered pods. #10080
* [ENHANCEMENT] OAuth2: Support for using a proxy URL to fetch OAuth2 tokens. #10492
* [ENHANCEMENT] Configuration: Add the ability to disable HTTP2. #10492
* [ENHANCEMENT] Config: Support overriding minimum TLS version. #10610
* [BUGFIX] Kubernetes SD: Explicitly include gcp auth from k8s.io. #10516
* [BUGFIX] Fix OpenMetrics parser to sort uppercase labels correctly. #10510
* [BUGFIX] UI: Fix scrape interval and duration tooltip not showing on target page. #10545
* [BUGFIX] Tracing/GRPC: Set TLS credentials only when insecure is false. #10592
* [BUGFIX] Agent: Fix ID collision when loading a WAL with multiple segments. #10587
* [BUGFIX] Remote-write: Fix a deadlock between Batch and flushing the queue. #10608
## 2.34.0 / 2022-03-15 ## 2.34.0 / 2022-03-15
* [CHANGE] UI: Classic UI removed. #10208 * [CHANGE] UI: Classic UI removed. #10208
@ -12,7 +43,7 @@
* [ENHANCEMENT] UI: Optimize the alerts page and add a search bar. #10142 * [ENHANCEMENT] UI: Optimize the alerts page and add a search bar. #10142
* [ENHANCEMENT] UI: Improve graph colors that were hard to see. #10179 * [ENHANCEMENT] UI: Improve graph colors that were hard to see. #10179
* [ENHANCEMENT] Config: Allow escaping of `$` with `$$` when using environment variables with external labels. #10129 * [ENHANCEMENT] Config: Allow escaping of `$` with `$$` when using environment variables with external labels. #10129
* [BUGFIX] PromQL: Properly return an error from histogram_quantile when metrics have the same labelset. #10140 * [BUGFIX] PromQL: Properly return an error from histogram_quantile when metrics have the same labelset. #10140
* [BUGFIX] UI: Fix bug that sets the range input to the resolution. #10227 * [BUGFIX] UI: Fix bug that sets the range input to the resolution. #10227
* [BUGFIX] TSDB: Fix a query panic when `memory-snapshot-on-shutdown` is enabled. #10348 * [BUGFIX] TSDB: Fix a query panic when `memory-snapshot-on-shutdown` is enabled. #10348
* [BUGFIX] Parser: Specify type in metadata parser errors. #10269 * [BUGFIX] Parser: Specify type in metadata parser errors. #10269
@ -331,7 +362,7 @@ Alertmanager API v2 was released in Alertmanager v0.16.0 (released in January
2019). 2019).
* [FEATURE] **experimental** API: Accept remote_write requests. Behind the --enable-feature=remote-write-receiver flag. #8424 * [FEATURE] **experimental** API: Accept remote_write requests. Behind the --enable-feature=remote-write-receiver flag. #8424
* [FEATURE] **experimental** PromQL: Add '@ <timestamp>' modifier. Behind the --enable-feature=promql-at-modifier flag. #8121 #8436 #8425 * [FEATURE] **experimental** PromQL: Add `@ <timestamp>` modifier. Behind the --enable-feature=promql-at-modifier flag. #8121 #8436 #8425
* [ENHANCEMENT] Add optional name property to testgroup for better test failure output. #8440 * [ENHANCEMENT] Add optional name property to testgroup for better test failure output. #8440
* [ENHANCEMENT] Add warnings into React Panel on the Graph page. #8427 * [ENHANCEMENT] Add warnings into React Panel on the Graph page. #8427
* [ENHANCEMENT] TSDB: Increase the number of buckets for the compaction duration metric. #8342 * [ENHANCEMENT] TSDB: Increase the number of buckets for the compaction duration metric. #8342
@ -376,12 +407,12 @@ Alertmanager API v2 was released in Alertmanager v0.16.0 (released in January
* [CHANGE] UI: Make the React UI default. #8142 * [CHANGE] UI: Make the React UI default. #8142
* [CHANGE] Remote write: The following metrics were removed/renamed in remote write. #6815 * [CHANGE] Remote write: The following metrics were removed/renamed in remote write. #6815
- `prometheus_remote_storage_succeeded_samples_total` was removed and `prometheus_remote_storage_samples_total` was introduced for all the samples attempted to send. * `prometheus_remote_storage_succeeded_samples_total` was removed and `prometheus_remote_storage_samples_total` was introduced for all the samples attempted to send.
- `prometheus_remote_storage_sent_bytes_total` was removed and replaced with `prometheus_remote_storage_samples_bytes_total` and `prometheus_remote_storage_metadata_bytes_total`. * `prometheus_remote_storage_sent_bytes_total` was removed and replaced with `prometheus_remote_storage_samples_bytes_total` and `prometheus_remote_storage_metadata_bytes_total`.
- `prometheus_remote_storage_failed_samples_total` -> `prometheus_remote_storage_samples_failed_total` . * `prometheus_remote_storage_failed_samples_total` -> `prometheus_remote_storage_samples_failed_total` .
- `prometheus_remote_storage_retried_samples_total` -> `prometheus_remote_storage_samples_retried_total`. * `prometheus_remote_storage_retried_samples_total` -> `prometheus_remote_storage_samples_retried_total`.
- `prometheus_remote_storage_dropped_samples_total` -> `prometheus_remote_storage_samples_dropped_total`. * `prometheus_remote_storage_dropped_samples_total` -> `prometheus_remote_storage_samples_dropped_total`.
- `prometheus_remote_storage_pending_samples` -> `prometheus_remote_storage_samples_pending`. * `prometheus_remote_storage_pending_samples` -> `prometheus_remote_storage_samples_pending`.
* [CHANGE] Remote: Do not collect non-initialized timestamp metrics. #8060 * [CHANGE] Remote: Do not collect non-initialized timestamp metrics. #8060
* [FEATURE] [EXPERIMENTAL] Remote write: Allow metric metadata to be propagated via remote write. The following new metrics were introduced: `prometheus_remote_storage_metadata_total`, `prometheus_remote_storage_metadata_failed_total`, `prometheus_remote_storage_metadata_retried_total`, `prometheus_remote_storage_metadata_bytes_total`. #6815 * [FEATURE] [EXPERIMENTAL] Remote write: Allow metric metadata to be propagated via remote write. The following new metrics were introduced: `prometheus_remote_storage_metadata_total`, `prometheus_remote_storage_metadata_failed_total`, `prometheus_remote_storage_metadata_retried_total`, `prometheus_remote_storage_metadata_bytes_total`. #6815
* [ENHANCEMENT] Remote write: Added a metric `prometheus_remote_storage_max_samples_per_send` for remote write. #8102 * [ENHANCEMENT] Remote write: Added a metric `prometheus_remote_storage_max_samples_per_send` for remote write. #8102
@ -557,7 +588,6 @@ This release changes WAL compression from opt-in to default. WAL compression wil
* [BUGFIX] Remote Write: Fixed blocked resharding edge case. #7122 * [BUGFIX] Remote Write: Fixed blocked resharding edge case. #7122
* [BUGFIX] Remote Write: Fixed remote write not updating on relabel configs change. #7073 * [BUGFIX] Remote Write: Fixed remote write not updating on relabel configs change. #7073
## 2.17.2 / 2020-04-20 ## 2.17.2 / 2020-04-20
* [BUGFIX] Federation: Register federation metrics #7081 * [BUGFIX] Federation: Register federation metrics #7081
@ -973,10 +1003,10 @@ We're rolling back the Dockerfile changes introduced in 2.6.0. If you made chang
## 2.4.2 / 2018-09-21 ## 2.4.2 / 2018-09-21
The last release didn't have bugfix included due to a vendoring error. The last release didn't have bugfix included due to a vendoring error.
* [BUGFIX] Handle WAL corruptions properly prometheus/tsdb#389 * [BUGFIX] Handle WAL corruptions properly prometheus/tsdb#389
* [BUGFIX] Handle WAL migrations correctly on Windows prometheus/tsdb#392 * [BUGFIX] Handle WAL migrations correctly on Windows prometheus/tsdb#392
## 2.4.1 / 2018-09-19 ## 2.4.1 / 2018-09-19
@ -1123,15 +1153,14 @@ This release includes multiple bugfixes and features. Further, the WAL implement
* [BUGFIX] tsdb: Cleanup and do not retry failing compactions. * [BUGFIX] tsdb: Cleanup and do not retry failing compactions.
* [BUGFIX] tsdb: Close WAL while shutting down. * [BUGFIX] tsdb: Close WAL while shutting down.
## 2.0.0 / 2017-11-08 ## 2.0.0 / 2017-11-08
This release includes a completely rewritten storage, huge performance This release includes a completely rewritten storage, huge performance
improvements, but also many backwards incompatible changes. For more improvements, but also many backwards incompatible changes. For more
information, read the announcement blog post and migration guide. information, read the announcement blog post and migration guide.
https://prometheus.io/blog/2017/11/08/announcing-prometheus-2-0/ <https://prometheus.io/blog/2017/11/08/announcing-prometheus-2-0/>
https://prometheus.io/docs/prometheus/2.0/migration/ <https://prometheus.io/docs/prometheus/2.0/migration/>
* [CHANGE] Completely rewritten storage layer, with WAL. This is not backwards compatible with 1.x storage, and many flags have changed/disappeared. * [CHANGE] Completely rewritten storage layer, with WAL. This is not backwards compatible with 1.x storage, and many flags have changed/disappeared.
* [CHANGE] New staleness behavior. Series now marked stale after target scrapes no longer return them, and soon after targets disappear from service discovery. * [CHANGE] New staleness behavior. Series now marked stale after target scrapes no longer return them, and soon after targets disappear from service discovery.
@ -1541,7 +1570,7 @@ This release contains multiple breaking changes to the configuration schema.
This version contains a breaking change to the query language. Please read This version contains a breaking change to the query language. Please read
the documentation on the grouping behavior of vector matching: the documentation on the grouping behavior of vector matching:
https://prometheus.io/docs/querying/operators/#vector-matching <https://prometheus.io/docs/querying/operators/#vector-matching>
* [FEATURE] Add experimental Microsoft Azure service discovery * [FEATURE] Add experimental Microsoft Azure service discovery
* [FEATURE] Add `ignoring` modifier for binary operations * [FEATURE] Add `ignoring` modifier for binary operations
@ -1667,7 +1696,7 @@ BREAKING CHANGES:
with InfluxDB 0.9.x. 0.8.x versions of InfluxDB are not supported anymore. with InfluxDB 0.9.x. 0.8.x versions of InfluxDB are not supported anymore.
* Escape sequences in double- and single-quoted string literals in rules or query * Escape sequences in double- and single-quoted string literals in rules or query
expressions are now interpreted like escape sequences in Go string literals expressions are now interpreted like escape sequences in Go string literals
(https://golang.org/ref/spec#String_literals). (<https://golang.org/ref/spec#String_literals>).
Future breaking changes / deprecated features: Future breaking changes / deprecated features:
@ -1803,6 +1832,7 @@ All changes:
* [CLEANUP] Resolve relative paths during configuration loading. * [CLEANUP] Resolve relative paths during configuration loading.
## 0.15.1 / 2015-07-27 ## 0.15.1 / 2015-07-27
* [BUGFIX] Fix vector matching behavior when there is a mix of equality and * [BUGFIX] Fix vector matching behavior when there is a mix of equality and
non-equality matchers in a vector selector and one matcher matches no series. non-equality matchers in a vector selector and one matcher matches no series.
* [ENHANCEMENT] Allow overriding `GOARCH` and `GOOS` in Makefile.INCLUDE. * [ENHANCEMENT] Allow overriding `GOARCH` and `GOOS` in Makefile.INCLUDE.
@ -1920,6 +1950,7 @@ All changes:
* [CLEANUP] Use new v1 HTTP API for querying and graphing. * [CLEANUP] Use new v1 HTTP API for querying and graphing.
## 0.14.0 / 2015-06-01 ## 0.14.0 / 2015-06-01
* [CHANGE] Configuration format changed and switched to YAML. * [CHANGE] Configuration format changed and switched to YAML.
(See the provided [migration tool](https://github.com/prometheus/migrate/releases).) (See the provided [migration tool](https://github.com/prometheus/migrate/releases).)
* [ENHANCEMENT] Redesign of state-preserving target discovery. * [ENHANCEMENT] Redesign of state-preserving target discovery.
@ -1945,9 +1976,11 @@ All changes:
* [ENHANCEMENT] Limit retrievable samples to the storage's retention window. * [ENHANCEMENT] Limit retrievable samples to the storage's retention window.
## 0.13.4 / 2015-05-23 ## 0.13.4 / 2015-05-23
* [BUGFIX] Fix a race while checkpointing fingerprint mappings. * [BUGFIX] Fix a race while checkpointing fingerprint mappings.
## 0.13.3 / 2015-05-11 ## 0.13.3 / 2015-05-11
* [BUGFIX] Handle fingerprint collisions properly. * [BUGFIX] Handle fingerprint collisions properly.
* [CHANGE] Comments in rules file must start with `#`. (The undocumented `//` * [CHANGE] Comments in rules file must start with `#`. (The undocumented `//`
and `/*...*/` comment styles are no longer supported.) and `/*...*/` comment styles are no longer supported.)
@ -1958,6 +1991,7 @@ All changes:
* [ENHANCEMENT] Terminate running queries during shutdown. * [ENHANCEMENT] Terminate running queries during shutdown.
## 0.13.2 / 2015-05-05 ## 0.13.2 / 2015-05-05
* [MAINTENANCE] Updated vendored dependencies to their newest versions. * [MAINTENANCE] Updated vendored dependencies to their newest versions.
* [MAINTENANCE] Include rule_checker and console templates in release tarball. * [MAINTENANCE] Include rule_checker and console templates in release tarball.
* [BUGFIX] Sort NaN as the lowest value. * [BUGFIX] Sort NaN as the lowest value.
@ -1968,10 +2002,12 @@ All changes:
* [BUGFIX] Show correct error on wrong DNS response. * [BUGFIX] Show correct error on wrong DNS response.
## 0.13.1 / 2015-04-09 ## 0.13.1 / 2015-04-09
* [BUGFIX] Treat memory series with zero chunks correctly in series maintenance. * [BUGFIX] Treat memory series with zero chunks correctly in series maintenance.
* [ENHANCEMENT] Improve readability of usage text even more. * [ENHANCEMENT] Improve readability of usage text even more.
## 0.13.0 / 2015-04-08 ## 0.13.0 / 2015-04-08
* [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of * [ENHANCEMENT] Double-delta encoding for chunks, saving typically 40% of
space, both in RAM and on disk. space, both in RAM and on disk.
* [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance * [ENHANCEMENT] Redesign of chunk persistence queuing, increasing performance
@ -1999,6 +2035,7 @@ All changes:
* [MAINTENANCE] Updated vendored dependencies to their newest versions. * [MAINTENANCE] Updated vendored dependencies to their newest versions.
## 0.12.0 / 2015-03-04 ## 0.12.0 / 2015-03-04
* [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES * [CHANGE] Use client_golang v0.3.1. THIS CHANGES FINGERPRINTING AND INVALIDATES
ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or ALL PERSISTED FINGERPRINTS. You have to wipe your storage to use this or
later versions. There is a version guard in place that will prevent you to later versions. There is a version guard in place that will prevent you to
@ -2014,6 +2051,7 @@ All changes:
* [CHANGE] Makefile uses Go 1.4.2. * [CHANGE] Makefile uses Go 1.4.2.
## 0.11.1 / 2015-02-27 ## 0.11.1 / 2015-02-27
* [BUGFIX] Make series maintenance complete again. (Ever since 0.9.0rc4, * [BUGFIX] Make series maintenance complete again. (Ever since 0.9.0rc4,
or commit 0851945, series would not be archived, chunk descriptors would or commit 0851945, series would not be archived, chunk descriptors would
not be evicted, and stale head chunks would never be closed. This happened not be evicted, and stale head chunks would never be closed. This happened
@ -2026,6 +2064,7 @@ All changes:
* [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing. * [ENHANCEMENT] Limit the number of 'dirty' series counted during checkpointing.
## 0.11.0 / 2015-02-23 ## 0.11.0 / 2015-02-23
* [FEATURE] Introduce new metric type Histogram with server-side aggregation. * [FEATURE] Introduce new metric type Histogram with server-side aggregation.
* [FEATURE] Add offset operator. * [FEATURE] Add offset operator.
* [FEATURE] Add floor, ceil and round functions. * [FEATURE] Add floor, ceil and round functions.
@ -2050,18 +2089,20 @@ All changes:
* [CLEANUP] Various code cleanups. * [CLEANUP] Various code cleanups.
## 0.10.0 / 2015-01-26 ## 0.10.0 / 2015-01-26
* [CHANGE] More efficient JSON result format in query API. This requires * [CHANGE] More efficient JSON result format in query API. This requires
up-to-date versions of PromDash and prometheus_cli, too. up-to-date versions of PromDash and prometheus_cli, too.
* [ENHANCEMENT] Excluded non-minified Bootstrap assets and the Bootstrap maps * [ENHANCEMENT] Excluded non-minified Bootstrap assets and the Bootstrap maps
from embedding into the binary. Those files are only used for debugging, from embedding into the binary. Those files are only used for debugging,
and then you can use -web.use-local-assets. By including fewer files, the and then you can use -web.use-local-assets. By including fewer files, the
RAM usage during compilation is much more manageable. RAM usage during compilation is much more manageable.
* [ENHANCEMENT] Help link points to https://prometheus.github.io now. * [ENHANCEMENT] Help link points to <https://prometheus.github.io> now.
* [FEATURE] Consoles for haproxy and cloudwatch. * [FEATURE] Consoles for haproxy and cloudwatch.
* [BUGFIX] Several fixes to graphs in consoles. * [BUGFIX] Several fixes to graphs in consoles.
* [CLEANUP] Removed a file size check that did not check anything. * [CLEANUP] Removed a file size check that did not check anything.
## 0.9.0 / 2015-01-23 ## 0.9.0 / 2015-01-23
* [CHANGE] Reworked command line flags, now more consistent and taking into * [CHANGE] Reworked command line flags, now more consistent and taking into
account needs of the new storage backend (see below). account needs of the new storage backend (see below).
* [CHANGE] Metric names are dropped after certain transformations. * [CHANGE] Metric names are dropped after certain transformations.
@ -2103,10 +2144,12 @@ All changes:
* [CLEANUP] Removed dead code. * [CLEANUP] Removed dead code.
## 0.8.0 / 2014-09-04 ## 0.8.0 / 2014-09-04
* [ENHANCEMENT] Stagger scrapes to spread out load. * [ENHANCEMENT] Stagger scrapes to spread out load.
* [BUGFIX] Correctly quote HTTP Accept header. * [BUGFIX] Correctly quote HTTP Accept header.
## 0.7.0 / 2014-08-06 ## 0.7.0 / 2014-08-06
* [FEATURE] Added new functions: abs(), topk(), bottomk(), drop_common_labels(). * [FEATURE] Added new functions: abs(), topk(), bottomk(), drop_common_labels().
* [FEATURE] Let console templates get graph links from expressions. * [FEATURE] Let console templates get graph links from expressions.
* [FEATURE] Allow console templates to dynamically include other templates. * [FEATURE] Allow console templates to dynamically include other templates.
@ -2121,6 +2164,7 @@ All changes:
* [ENHANCEMENT] Dockerfile also builds Prometheus support tools now. * [ENHANCEMENT] Dockerfile also builds Prometheus support tools now.
## 0.6.0 / 2014-06-30 ## 0.6.0 / 2014-06-30
* [FEATURE] Added console and alert templates support, along with various template functions. * [FEATURE] Added console and alert templates support, along with various template functions.
* [PERFORMANCE] Much faster and more memory-efficient flushing to disk. * [PERFORMANCE] Much faster and more memory-efficient flushing to disk.
* [ENHANCEMENT] Query results are now only logged when debugging. * [ENHANCEMENT] Query results are now only logged when debugging.

View file

@ -1,3 +1,3 @@
## Prometheus Community Code of Conduct # Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

View file

@ -21,7 +21,6 @@ Prometheus uses GitHub to manage reviews of pull requests.
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works). * Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works).
## Steps to Contribute ## Steps to Contribute
Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
@ -33,7 +32,8 @@ You can [spin up a prebuilt dev environment](https://gitpod.io/#https://github.c
For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source) For complete instructions on how to compile see: [Building From Source](https://github.com/prometheus/prometheus#building-from-source)
For quickly compiling and testing your changes do: For quickly compiling and testing your changes do:
```
```bash
# For building. # For building.
go build ./cmd/prometheus/ go build ./cmd/prometheus/
./prometheus ./prometheus

View file

@ -1,3 +1,5 @@
# Maintainers
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers: Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
* `cmd` * `cmd`

View file

@ -63,10 +63,15 @@ ui-lint:
assets: ui-install ui-build assets: ui-install ui-build
.PHONY: assets-compress .PHONY: assets-compress
assets-compress: assets-compress: assets
@echo '>> compressing assets' @echo '>> compressing assets'
scripts/compress_assets.sh scripts/compress_assets.sh
.PHONY: assets-tarball
assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh
.PHONY: test .PHONY: test
# If we only want to only test go code we have to change the test target # If we only want to only test go code we have to change the test target
# which is called by all. # which is called by all.
@ -96,7 +101,7 @@ plugins/plugins.go: plugins.yml plugins/generate.go
plugins: plugins/plugins.go plugins: plugins/plugins.go
.PHONY: build .PHONY: build
build: assets assets-compress common-build plugins build: assets npm_licenses assets-compress common-build plugins
.PHONY: bench_tsdb .PHONY: bench_tsdb
bench_tsdb: $(PROMU) bench_tsdb: $(PROMU)

109
README.md
View file

@ -17,18 +17,18 @@ displays the results, and can trigger alerts when specified conditions are obser
The features that distinguish Prometheus from other metrics and monitoring systems are: The features that distinguish Prometheus from other metrics and monitoring systems are:
- A **multi-dimensional** data model (time series defined by metric name and set of key/value dimensions) * A **multi-dimensional** data model (time series defined by metric name and set of key/value dimensions)
- PromQL, a **powerful and flexible query language** to leverage this dimensionality * PromQL, a **powerful and flexible query language** to leverage this dimensionality
- No dependency on distributed storage; **single server nodes are autonomous** * No dependency on distributed storage; **single server nodes are autonomous**
- An HTTP **pull model** for time series collection * An HTTP **pull model** for time series collection
- **Pushing time series** is supported via an intermediary gateway for batch jobs * **Pushing time series** is supported via an intermediary gateway for batch jobs
- Targets are discovered via **service discovery** or **static configuration** * Targets are discovered via **service discovery** or **static configuration**
- Multiple modes of **graphing and dashboarding support** * Multiple modes of **graphing and dashboarding support**
- Support for hierarchical and horizontal **federation** * Support for hierarchical and horizontal **federation**
## Architecture overview ## Architecture overview
![](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg) ![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg)
## Install ## Install
@ -49,13 +49,16 @@ Docker images are available on [Quay.io](https://quay.io/repository/prometheus/p
You can launch a Prometheus container for trying it out with You can launch a Prometheus container for trying it out with
$ docker run --name prometheus -d -p 127.0.0.1:9090:9090 prom/prometheus ```bash
docker run --name prometheus -d -p 127.0.0.1:9090:9090 prom/prometheus
```
Prometheus will now be reachable at http://localhost:9090/. Prometheus will now be reachable at <http://localhost:9090/>.
### Building from source ### Building from source
To build Prometheus from source code, You need: To build Prometheus from source code, You need:
* Go [version 1.16 or greater](https://golang.org/doc/install). * Go [version 1.16 or greater](https://golang.org/doc/install).
* NodeJS [version 16 or greater](https://nodejs.org/). * NodeJS [version 16 or greater](https://nodejs.org/).
* npm [version 7 or greater](https://www.npmjs.com/). * npm [version 7 or greater](https://www.npmjs.com/).
@ -63,35 +66,39 @@ To build Prometheus from source code, You need:
You can directly use the `go` tool to download and install the `prometheus` You can directly use the `go` tool to download and install the `prometheus`
and `promtool` binaries into your `GOPATH`: and `promtool` binaries into your `GOPATH`:
$ GO111MODULE=on go install github.com/prometheus/prometheus/cmd/... ```bash
$ prometheus --config.file=your_config.yml GO111MODULE=on go install github.com/prometheus/prometheus/cmd/...
prometheus --config.file=your_config.yml
```
*However*, when using `go install` to build Prometheus, Prometheus will expect to be able to *However*, when using `go install` to build Prometheus, Prometheus will expect to be able to
read its web assets from local filesystem directories under `web/ui/static` and read its web assets from local filesystem directories under `web/ui/static` and
`web/ui/templates`. In order for these assets to be found, you will have to run Prometheus `web/ui/templates`. In order for these assets to be found, you will have to run Prometheus
from the root of the cloned repository. Note also that these directories do not include the from the root of the cloned repository. Note also that these directories do not include the
new experimental React UI unless it has been built explicitly using `make assets` or `make build`. React UI unless it has been built explicitly using `make assets` or `make build`.
An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml) An example of the above configuration file can be found [here.](https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus.yml)
You can also clone the repository yourself and build using `make build`, which will compile in You can also clone the repository yourself and build using `make build`, which will compile in
the web assets so that Prometheus can be run from anywhere: the web assets so that Prometheus can be run from anywhere:
$ mkdir -p $GOPATH/src/github.com/prometheus ```bash
$ cd $GOPATH/src/github.com/prometheus mkdir -p $GOPATH/src/github.com/prometheus
$ git clone https://github.com/prometheus/prometheus.git cd $GOPATH/src/github.com/prometheus
$ cd prometheus git clone https://github.com/prometheus/prometheus.git
$ make build cd prometheus
$ ./prometheus --config.file=your_config.yml make build
./prometheus --config.file=your_config.yml
```
The Makefile provides several targets: The Makefile provides several targets:
* *build*: build the `prometheus` and `promtool` binaries (includes building and compiling in web assets) * *build*: build the `prometheus` and `promtool` binaries (includes building and compiling in web assets)
* *test*: run the tests * *test*: run the tests
* *test-short*: run the short tests * *test-short*: run the short tests
* *format*: format the source code * *format*: format the source code
* *vet*: check the source code for common errors * *vet*: check the source code for common errors
* *assets*: build the new experimental React UI * *assets*: build the React UI
### Service discovery plugins ### Service discovery plugins
@ -115,22 +122,55 @@ always, be extra careful when loading third party code.
The `make docker` target is designed for use in our CI system. The `make docker` target is designed for use in our CI system.
You can build a docker image locally with the following commands: You can build a docker image locally with the following commands:
$ make promu ```bash
$ promu crossbuild -p linux/amd64 make promu
$ make npm_licenses promu crossbuild -p linux/amd64
$ make common-docker-amd64 make npm_licenses
make common-docker-amd64
```
*NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar). *NB* if you are on a Mac, you will need [gnu-tar](https://formulae.brew.sh/formula/gnu-tar).
## Using Prometheus as a Go Library
### Remote Write
We are publishing our Remote Write protobuf independently at
[buf.build](https://buf.build/prometheus/prometheus/assets).
You can use that as a library:
```shell
go get go.buf.build/protocolbuffers/go/prometheus/prometheus
```
This is experimental.
### Prometheus code base
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
Prometheus release number do not exactly match Go module releases. For the
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
```shell
go get github.com/prometheus/prometheus@v0.35.0
```
This solution makes it clear that we might break our internal Go APIs between
minor user-facing releases, as [breaking changes are allowed in major version
zero](https://semver.org/#spec-item-4).
## React UI Development ## React UI Development
For more information on building, running, and developing on the new React-based UI, see the React app's [README.md](web/ui/README.md). For more information on building, running, and developing on the React-based UI, see the React app's [README.md](web/ui/README.md).
## More information ## More information
* The source code is periodically indexed, but due to an issue with versioning, the "latest" docs shown on Godoc are outdated. Instead, you can use [the docs for v2.31.1](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab). * Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
* You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml). * You will find a CircleCI configuration in [`.circleci/config.yml`](.circleci/config.yml).
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
## Contributing ## Contributing
@ -140,7 +180,6 @@ Refer to [CONTRIBUTING.md](https://github.com/prometheus/prometheus/blob/main/CO
Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE). Apache License 2.0, see [LICENSE](https://github.com/prometheus/prometheus/blob/main/LICENSE).
[hub]: https://hub.docker.com/r/prom/prometheus/ [hub]: https://hub.docker.com/r/prom/prometheus/
[circleci]: https://circleci.com/gh/prometheus/prometheus [circleci]: https://circleci.com/gh/prometheus/prometheus
[quay]: https://quay.io/repository/prometheus/prometheus [quay]: https://quay.io/repository/prometheus/prometheus

View file

@ -41,7 +41,9 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) | | v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) |
| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) | | v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) |
| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) | | v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) |
| v2.37 | 2022-06-29 | **searching for volunteer** | | v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.38 | 2022-08-10 | **searching for volunteer** |
| v2.39 | 2022-09-21 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
@ -95,7 +97,7 @@ of these in pull requests, one per feature.
#### Updating Go dependencies #### Updating Go dependencies
``` ```bash
make update-go-deps make update-go-deps
git add go.mod go.sum git add go.mod go.sum
git commit -m "Update dependencies" git commit -m "Update dependencies"
@ -112,10 +114,10 @@ In case you want to update the UI dependencies, you can run the following comman
make update-npm-deps make update-npm-deps
``` ```
Once this step completes, please verify that no additional `node_modules` directory was created in any of the module subdirectories Once this step completes, please verify that no additional `node_modules` directory was created in any of the module subdirectories
(which could indicate conflicting dependency versions across modules). Then run `make ui-build` to verify that the build is still working. (which could indicate conflicting dependency versions across modules). Then run `make ui-build` to verify that the build is still working.
Note: Once in a while, the npm dependencies should also be updated to their latest release versions (major or minor) with `make upgrade-npm-deps`, Note: Once in a while, the npm dependencies should also be updated to their latest release versions (major or minor) with `make upgrade-npm-deps`,
though this may be done at convenient times (e.g. by the UI maintainers) that are out-of-sync with Prometheus releases. though this may be done at convenient times (e.g. by the UI maintainers) that are out-of-sync with Prometheus releases.
### 1. Prepare your release ### 1. Prepare your release
@ -142,9 +144,21 @@ Entries in the `CHANGELOG.md` are meant to be in this order:
Tag the new release via the following commands: Tag the new release via the following commands:
```bash ```bash
$ tag="v$(< VERSION)" tag="v$(< VERSION)"
$ git tag -s "${tag}" -m "${tag}" git tag -s "${tag}" -m "${tag}"
$ git push origin "${tag}" git push origin "${tag}"
```
Go modules versioning requires strict use of semver. Because we do not commit to
avoid code-level breaking changes for the libraries between minor releases of
the Prometheus server, we use major version zero releases for the libraries.
Tag the new library release via the following commands:
```bash
tag="v$(sed s/2/0/ < VERSION)"
git tag -s "${tag}" -m "${tag}"
git push origin "${tag}"
``` ```
Optionally, you can use this handy `.gitconfig` alias. Optionally, you can use this handy `.gitconfig` alias.

View file

@ -3,4 +3,4 @@
The Prometheus security policy, including how to report vulnerabilities, can be The Prometheus security policy, including how to report vulnerabilities, can be
found here: found here:
https://prometheus.io/docs/operating/security/ <https://prometheus.io/docs/operating/security/>

View file

@ -1 +1 @@
2.34.0 2.35.0

View file

@ -17,7 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io/ioutil" "io"
"math" "math"
"os" "os"
"os/exec" "os/exec"
@ -211,7 +211,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
stderr, err := prom.StderrPipe() stderr, err := prom.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
go func() { go func() {
slurp, _ := ioutil.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
}() }()
@ -256,7 +256,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
stderr, err := prom.StderrPipe() stderr, err := prom.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
go func() { go func() {
slurp, _ := ioutil.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
}() }()
@ -445,7 +445,7 @@ func TestModeSpecificFlags(t *testing.T) {
stderr, err := prom.StderrPipe() stderr, err := prom.StderrPipe()
require.NoError(t, err) require.NoError(t, err)
go func() { go func() {
slurp, _ := ioutil.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
}() }()

View file

@ -17,7 +17,7 @@ import (
"bufio" "bufio"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -235,10 +235,10 @@ func (p *queryLogTest) run(t *testing.T) {
p.skip(t) p.skip(t)
// Setup temporary files for this test. // Setup temporary files for this test.
queryLogFile, err := ioutil.TempFile("", "query") queryLogFile, err := os.CreateTemp("", "query")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(queryLogFile.Name()) defer os.Remove(queryLogFile.Name())
p.configFile, err = ioutil.TempFile("", "config") p.configFile, err = os.CreateTemp("", "config")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(p.configFile.Name()) defer os.Remove(p.configFile.Name())
@ -269,7 +269,7 @@ func (p *queryLogTest) run(t *testing.T) {
wg.Add(1) wg.Add(1)
defer wg.Wait() defer wg.Wait()
go func() { go func() {
slurp, _ := ioutil.ReadAll(stderr) slurp, _ := io.ReadAll(stderr)
t.Log(string(slurp)) t.Log(string(slurp))
wg.Done() wg.Done()
}() }()
@ -333,7 +333,7 @@ func (p *queryLogTest) run(t *testing.T) {
return return
} }
// Move the file, Prometheus should still write to the old file. // Move the file, Prometheus should still write to the old file.
newFile, err := ioutil.TempFile("", "newLoc") newFile, err := os.CreateTemp("", "newLoc")
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, newFile.Close()) require.NoError(t, newFile.Close())
defer os.Remove(newFile.Name()) defer os.Remove(newFile.Name())

View file

@ -15,7 +15,7 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -41,7 +41,7 @@ func debugWrite(cfg debugWriterConfig) error {
if err != nil { if err != nil {
return errors.Wrap(err, "error executing HTTP request") return errors.Wrap(err, "error executing HTTP request")
} }
body, err := ioutil.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
res.Body.Close() res.Body.Close()
if err != nil { if err != nil {
return errors.Wrap(err, "error reading the response body") return errors.Wrap(err, "error reading the response body")

View file

@ -19,7 +19,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"net/http" "net/http"
"net/url" "net/url"
@ -66,8 +65,14 @@ const (
failureExitCode = 1 failureExitCode = 1
// Exit code 3 is used for "one or more lint issues detected". // Exit code 3 is used for "one or more lint issues detected".
lintErrExitCode = 3 lintErrExitCode = 3
lintOptionAll = "all"
lintOptionDuplicateRules = "duplicate-rules"
lintOptionNone = "none"
) )
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
func main() { func main() {
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout) app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
app.Version(version.Print("promtool")) app.Version(version.Print("promtool"))
@ -86,6 +91,10 @@ func main() {
"The config files to check.", "The config files to check.",
).Required().ExistingFiles() ).Required().ExistingFiles()
checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool() checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool()
checkConfigLint := checkConfigCmd.Flag(
"lint",
"Linting checks to apply to the rules specified in the config. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
).Default(lintOptionDuplicateRules).String()
checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.") checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.")
webConfigFiles := checkWebConfigCmd.Arg( webConfigFiles := checkWebConfigCmd.Arg(
@ -98,6 +107,10 @@ func main() {
"rule-files", "rule-files",
"The rule files to check.", "The rule files to check.",
).Required().ExistingFiles() ).Required().ExistingFiles()
checkRulesLint := checkRulesCmd.Flag(
"lint",
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
).Default(lintOptionDuplicateRules).String()
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage) checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool() checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
@ -222,13 +235,13 @@ func main() {
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout)) os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout))
case checkConfigCmd.FullCommand(): case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, *configFiles...)) os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint), *configFiles...))
case checkWebConfigCmd.FullCommand(): case checkWebConfigCmd.FullCommand():
os.Exit(CheckWebConfig(*webConfigFiles...)) os.Exit(CheckWebConfig(*webConfigFiles...))
case checkRulesCmd.FullCommand(): case checkRulesCmd.FullCommand():
os.Exit(CheckRules(*ruleFiles...)) os.Exit(CheckRules(newLintConfig(*checkRulesLint), *ruleFiles...))
case checkMetricsCmd.FullCommand(): case checkMetricsCmd.FullCommand():
os.Exit(CheckMetrics(*checkMetricsExtended)) os.Exit(CheckMetrics(*checkMetricsExtended))
@ -283,9 +296,39 @@ func main() {
} }
} }
// nolint:revive
var lintError = fmt.Errorf("lint error")
type lintConfig struct {
all bool
duplicateRules bool
}
func newLintConfig(stringVal string) lintConfig {
items := strings.Split(stringVal, ",")
ls := lintConfig{}
for _, setting := range items {
switch setting {
case lintOptionAll:
ls.all = true
case lintOptionDuplicateRules:
ls.duplicateRules = true
case lintOptionNone:
default:
fmt.Printf("WARNING: unknown lint option %s\n", setting)
}
}
return ls
}
func (ls lintConfig) lintDuplicateRules() bool {
return ls.all || ls.duplicateRules
}
// CheckConfig validates configuration files. // CheckConfig validates configuration files.
func CheckConfig(agentMode, checkSyntaxOnly bool, files ...string) int { func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
failed := false failed := false
hasLintErrors := false
for _, f := range files { for _, f := range files {
ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly) ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly)
@ -301,18 +344,24 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, files ...string) int {
fmt.Println() fmt.Println()
for _, rf := range ruleFiles { for _, rf := range ruleFiles {
if n, errs := checkRules(rf); len(errs) > 0 { if n, errs := checkRules(rf, lintSettings); len(errs) > 0 {
fmt.Fprintln(os.Stderr, " FAILED:") fmt.Fprintln(os.Stderr, " FAILED:")
for _, err := range errs { for _, err := range errs {
fmt.Fprintln(os.Stderr, " ", err) fmt.Fprintln(os.Stderr, " ", err)
} }
failed = true failed = true
for _, err := range errs {
hasLintErrors = hasLintErrors || errors.Is(err, lintError)
}
} else { } else {
fmt.Printf(" SUCCESS: %d rules found\n", n) fmt.Printf(" SUCCESS: %d rules found\n", n)
} }
fmt.Println() fmt.Println()
} }
} }
if failed && hasLintErrors {
return lintErrExitCode
}
if failed { if failed {
return failureExitCode return failureExitCode
} }
@ -491,7 +540,7 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
} }
defer fd.Close() defer fd.Close()
content, err := ioutil.ReadAll(fd) content, err := io.ReadAll(fd)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -521,28 +570,35 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
} }
// CheckRules validates rule files. // CheckRules validates rule files.
func CheckRules(files ...string) int { func CheckRules(ls lintConfig, files ...string) int {
failed := false failed := false
hasLintErrors := false
for _, f := range files { for _, f := range files {
if n, errs := checkRules(f); errs != nil { if n, errs := checkRules(f, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:") fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs { for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error()) fmt.Fprintln(os.Stderr, e.Error())
} }
failed = true failed = true
for _, err := range errs {
hasLintErrors = hasLintErrors || errors.Is(err, lintError)
}
} else { } else {
fmt.Printf(" SUCCESS: %d rules found\n", n) fmt.Printf(" SUCCESS: %d rules found\n", n)
} }
fmt.Println() fmt.Println()
} }
if failed && hasLintErrors {
return lintErrExitCode
}
if failed { if failed {
return failureExitCode return failureExitCode
} }
return successExitCode return successExitCode
} }
func checkRules(filename string) (int, []error) { func checkRules(filename string, lintSettings lintConfig) (int, []error) {
fmt.Println("Checking", filename) fmt.Println("Checking", filename)
rgs, errs := rulefmt.ParseFile(filename) rgs, errs := rulefmt.ParseFile(filename)
@ -555,16 +611,19 @@ func checkRules(filename string) (int, []error) {
numRules += len(rg.Rules) numRules += len(rg.Rules)
} }
dRules := checkDuplicates(rgs.Groups) if lintSettings.lintDuplicateRules() {
if len(dRules) != 0 { dRules := checkDuplicates(rgs.Groups)
fmt.Printf("%d duplicate rule(s) found.\n", len(dRules)) if len(dRules) != 0 {
for _, n := range dRules { errMessage := fmt.Sprintf("%d duplicate rule(s) found.\n", len(dRules))
fmt.Printf("Metric: %s\nLabel(s):\n", n.metric) for _, n := range dRules {
for _, l := range n.label { errMessage += fmt.Sprintf("Metric: %s\nLabel(s):\n", n.metric)
fmt.Printf("\t%s: %s\n", l.Name, l.Value) for _, l := range n.label {
errMessage += fmt.Sprintf("\t%s: %s\n", l.Name, l.Value)
}
} }
errMessage += "Might cause inconsistency while recording expressions"
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}
} }
fmt.Println("Might cause inconsistency while recording expressions.")
} }
return numRules, nil return numRules, nil

View file

@ -15,8 +15,8 @@ package main
import ( import (
"context" "context"
"io/ioutil"
"math" "math"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
@ -189,7 +189,7 @@ func createSingleRuleTestFiles(path string) error {
labels: labels:
testlabel11: testlabelvalue11 testlabel11: testlabelvalue11
` `
return ioutil.WriteFile(path, []byte(recordingRules), 0o777) return os.WriteFile(path, []byte(recordingRules), 0o777)
} }
func createMultiRuleTestFiles(path string) error { func createMultiRuleTestFiles(path string) error {
@ -209,7 +209,7 @@ func createMultiRuleTestFiles(path string) error {
labels: labels:
testlabel11: testlabelvalue13 testlabel11: testlabelvalue13
` `
return ioutil.WriteFile(path, []byte(recordingRules), 0o777) return os.WriteFile(path, []byte(recordingRules), 0o777)
} }
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
@ -237,7 +237,7 @@ func TestBackfillLabels(t *testing.T) {
labels: labels:
name1: value-from-rule name1: value-from-rule
` `
require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777)) require.NoError(t, os.WriteFile(path, []byte(recordingRules), 0o777))
errs := ruleImporter.loadGroups(ctx, []string{path}) errs := ruleImporter.loadGroups(ctx, []string{path})
for _, err := range errs { for _, err := range errs {
require.NoError(t, err) require.NoError(t, err)

View file

@ -16,7 +16,6 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -64,7 +63,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error { func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
fmt.Println("Unit Testing: ", filename) fmt.Println("Unit Testing: ", filename)
b, err := ioutil.ReadFile(filename) b, err := os.ReadFile(filename)
if err != nil { if err != nil {
return []error{err} return []error{err}
} }

View file

@ -15,7 +15,6 @@ package config
import ( import (
"fmt" "fmt"
"io/ioutil"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -103,7 +102,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
// LoadFile parses the given YAML file into a Config. // LoadFile parses the given YAML file into a Config.
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
content, err := ioutil.ReadFile(filename) content, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -14,8 +14,8 @@
package config package config
import ( import (
"crypto/tls"
"encoding/json" "encoding/json"
"io/ioutil"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -179,6 +179,9 @@ var expectedConf = &Config{
}, },
FollowRedirects: true, FollowRedirects: true,
EnableHTTP2: true, EnableHTTP2: true,
TLSConfig: config.TLSConfig{
MinVersion: config.TLSVersion(tls.VersionTLS10),
},
}, },
ServiceDiscoveryConfigs: discovery.Configs{ ServiceDiscoveryConfigs: discovery.Configs{
@ -855,6 +858,17 @@ var expectedConf = &Config{
Scheme: DefaultScrapeConfig.Scheme, Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
RelabelConfigs: []*relabel.Config{
{
Action: relabel.Uppercase,
Regex: relabel.DefaultRelabelConfig.Regex,
Replacement: relabel.DefaultRelabelConfig.Replacement,
Separator: relabel.DefaultRelabelConfig.Separator,
SourceLabels: model.LabelNames{"instance"},
TargetLabel: "instance",
},
},
ServiceDiscoveryConfigs: discovery.Configs{ ServiceDiscoveryConfigs: discovery.Configs{
&hetzner.SDConfig{ &hetzner.SDConfig{
HTTPClientConfig: config.HTTPClientConfig{ HTTPClientConfig: config.HTTPClientConfig{
@ -1194,6 +1208,30 @@ var expectedErrors = []struct {
filename: "labelmap.bad.yml", filename: "labelmap.bad.yml",
errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action", errMsg: "\"l-$1\" is invalid 'replacement' for labelmap action",
}, },
{
filename: "lowercase.bad.yml",
errMsg: "relabel configuration for lowercase action requires 'target_label' value",
},
{
filename: "lowercase2.bad.yml",
errMsg: "\"42lab\" is invalid 'target_label' for lowercase action",
},
{
filename: "lowercase3.bad.yml",
errMsg: "'replacement' can not be set for lowercase action",
},
{
filename: "uppercase.bad.yml",
errMsg: "relabel configuration for uppercase action requires 'target_label' value",
},
{
filename: "uppercase2.bad.yml",
errMsg: "\"42lab\" is invalid 'target_label' for uppercase action",
},
{
filename: "uppercase3.bad.yml",
errMsg: "'replacement' can not be set for uppercase action",
},
{ {
filename: "rules.bad.yml", filename: "rules.bad.yml",
errMsg: "invalid rule file path", errMsg: "invalid rule file path",
@ -1506,7 +1544,7 @@ func TestBadConfigs(t *testing.T) {
} }
func TestBadStaticConfigsJSON(t *testing.T) { func TestBadStaticConfigsJSON(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.json") content, err := os.ReadFile("testdata/static_config.bad.json")
require.NoError(t, err) require.NoError(t, err)
var tg targetgroup.Group var tg targetgroup.Group
err = json.Unmarshal(content, &tg) err = json.Unmarshal(content, &tg)
@ -1514,7 +1552,7 @@ func TestBadStaticConfigsJSON(t *testing.T) {
} }
func TestBadStaticConfigsYML(t *testing.T) { func TestBadStaticConfigsYML(t *testing.T) {
content, err := ioutil.ReadFile("testdata/static_config.bad.yml") content, err := os.ReadFile("testdata/static_config.bad.yml")
require.NoError(t, err) require.NoError(t, err)
var tg targetgroup.Group var tg targetgroup.Group
err = yaml.UnmarshalStrict(content, &tg) err = yaml.UnmarshalStrict(content, &tg)

View file

@ -91,6 +91,9 @@ scrape_configs:
authorization: authorization:
credentials_file: valid_token_file credentials_file: valid_token_file
tls_config:
min_version: TLS10
- job_name: service-x - job_name: service-x
basic_auth: basic_auth:
@ -325,6 +328,10 @@ scrape_configs:
key_file: valid_key_file key_file: valid_key_file
- job_name: hetzner - job_name: hetzner
relabel_configs:
- action: uppercase
source_labels: [instance]
target_label: instance
hetzner_sd_configs: hetzner_sd_configs:
- role: hcloud - role: hcloud
authorization: authorization:

5
config/testdata/lowercase.bad.yml vendored Normal file
View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: lowercase
source_labels: [__name__]

6
config/testdata/lowercase2.bad.yml vendored Normal file
View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: lowercase
source_labels: [__name__]
target_label: 42lab

7
config/testdata/lowercase3.bad.yml vendored Normal file
View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: lowercase
source_labels: [__name__]
target_label: __name__
replacement: bar

5
config/testdata/uppercase.bad.yml vendored Normal file
View file

@ -0,0 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: uppercase
source_labels: [__name__]

6
config/testdata/uppercase2.bad.yml vendored Normal file
View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: uppercase
source_labels: [__name__]
target_label: 42lab

7
config/testdata/uppercase3.bad.yml vendored Normal file
View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- action: uppercase
source_labels: [__name__]
target_label: __name__
replacement: bar

View file

@ -18,7 +18,6 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -95,7 +94,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
return nil, err return nil, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()

View file

@ -17,7 +17,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -376,7 +376,7 @@ func (d *Discovery) readFile(filename string) ([]*targetgroup.Group, error) {
} }
defer fd.Close() defer fd.Close()
content, err := ioutil.ReadAll(fd) content, err := io.ReadAll(fd)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -17,7 +17,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -73,7 +72,7 @@ func (t *testRunner) copyFile(src string) string {
func (t *testRunner) copyFileTo(src, name string) string { func (t *testRunner) copyFileTo(src, name string) string {
t.Helper() t.Helper()
newf, err := ioutil.TempFile(t.dir, "") newf, err := os.CreateTemp(t.dir, "")
require.NoError(t, err) require.NoError(t, err)
f, err := os.Open(src) f, err := os.Open(src)
@ -95,7 +94,7 @@ func (t *testRunner) copyFileTo(src, name string) string {
func (t *testRunner) writeString(file, data string) { func (t *testRunner) writeString(file, data string) {
t.Helper() t.Helper()
newf, err := ioutil.TempFile(t.dir, "") newf, err := os.CreateTemp(t.dir, "")
require.NoError(t, err) require.NoError(t, err)
_, err = newf.WriteString(data) _, err = newf.WriteString(data)

View file

@ -18,7 +18,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
@ -85,7 +84,7 @@ func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -93,7 +92,7 @@ func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, err
return nil, errors.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) return nil, errors.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
} }
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -18,7 +18,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -157,7 +156,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, err return nil, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -171,7 +170,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, errors.Errorf("unsupported content type %q", resp.Header.Get("Content-Type")) return nil, errors.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
} }
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
failuresCount.Inc() failuresCount.Inc()
return nil, err return nil, err

View file

@ -16,7 +16,7 @@ package kubernetes
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil" "os"
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
@ -312,7 +312,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
} }
if conf.NamespaceDiscovery.IncludeOwnNamespace { if conf.NamespaceDiscovery.IncludeOwnNamespace {
ownNamespaceContents, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") ownNamespaceContents, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
if err != nil { if err != nil {
return nil, fmt.Errorf("could not determine the pod's namespace: %w", err) return nil, fmt.Errorf("could not determine the pod's namespace: %w", err)
} }

View file

@ -18,10 +18,10 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
"os"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -186,7 +186,7 @@ type authTokenFileRoundTripper struct {
// newAuthTokenFileRoundTripper adds the auth token read from the file to a request. // newAuthTokenFileRoundTripper adds the auth token read from the file to a request.
func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
// fail-fast if we can't read the file. // fail-fast if we can't read the file.
_, err := ioutil.ReadFile(tokenFile) _, err := os.ReadFile(tokenFile)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read auth token file %s", tokenFile) return nil, errors.Wrapf(err, "unable to read auth token file %s", tokenFile)
} }
@ -194,7 +194,7 @@ func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.
} }
func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) {
b, err := ioutil.ReadFile(rt.authTokenFile) b, err := os.ReadFile(rt.authTokenFile)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read auth token file %s", rt.authTokenFile) return nil, errors.Wrapf(err, "unable to read auth token file %s", rt.authTokenFile)
} }
@ -331,7 +331,7 @@ func fetchApps(ctx context.Context, client *http.Client, url string) (*appList,
return nil, err return nil, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -339,7 +339,7 @@ func fetchApps(ctx context.Context, client *http.Client, url string) (*appList,
return nil, errors.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode) return nil, errors.Errorf("non 2xx status '%v' response during marathon service discovery", resp.StatusCode)
} }
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,9 +16,9 @@ package moby
import ( import (
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
@ -63,7 +63,7 @@ func (m *SDMock) Setup() {
// HandleNodesList mocks nodes list. // HandleNodesList mocks nodes list.
func (m *SDMock) SetupHandlers() { func (m *SDMock) SetupHandlers() {
headers := make(map[string]string) headers := make(map[string]string)
rawHeaders, err := ioutil.ReadFile(filepath.Join("testdata", m.directory, "headers.yml")) rawHeaders, err := os.ReadFile(filepath.Join("testdata", m.directory, "headers.yml"))
require.NoError(m.t, err) require.NoError(m.t, err)
yaml.Unmarshal(rawHeaders, &headers) yaml.Unmarshal(rawHeaders, &headers)
@ -102,13 +102,13 @@ func (m *SDMock) SetupHandlers() {
f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10] f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10]
} }
} }
if response, err := ioutil.ReadFile(filepath.Join("testdata", m.directory, f+".json")); err == nil { if response, err := os.ReadFile(filepath.Join("testdata", m.directory, f+".json")); err == nil {
w.Header().Add("content-type", "application/json") w.Header().Add("content-type", "application/json")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write(response) w.Write(response)
return return
} }
if response, err := ioutil.ReadFile(filepath.Join("testdata", m.directory, f)); err == nil { if response, err := os.ReadFile(filepath.Join("testdata", m.directory, f)); err == nil {
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write(response) w.Write(response)
return return

View file

@ -26,6 +26,17 @@
"alias": "/cgi-bin", "alias": "/cgi-bin",
"path": "/var/www/cgi-bin" "path": "/var/www/cgi-bin"
} }
],
"port": 22,
"pi": 3.141592653589793,
"buckets": [
0,
2,
5
],
"coordinates": [
60.13464726551357,
-2.0513768021728893
] ]
}, },
"resource": "49af83866dc5a1518968b68e58a25319107afe11", "resource": "49af83866dc5a1518968b68e58a25319107afe11",

View file

@ -19,7 +19,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -187,7 +186,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, err return nil, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -199,7 +198,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, errors.Errorf("unsupported content type %s", resp.Header.Get("Content-Type")) return nil, errors.Errorf("unsupported content type %s", resp.Header.Get("Content-Type"))
} }
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -137,10 +137,14 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"), model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"),
model.LabelName("__meta_puppetdb_parameter_access_log"): model.LabelValue("true"), model.LabelName("__meta_puppetdb_parameter_access_log"): model.LabelValue("true"),
model.LabelName("__meta_puppetdb_parameter_access_log_file"): model.LabelValue("ssl_access_log"), model.LabelName("__meta_puppetdb_parameter_access_log_file"): model.LabelValue("ssl_access_log"),
model.LabelName("__meta_puppetdb_parameter_buckets"): model.LabelValue("0,2,5"),
model.LabelName("__meta_puppetdb_parameter_coordinates"): model.LabelValue("60.13464726551357,-2.0513768021728893"),
model.LabelName("__meta_puppetdb_parameter_docroot"): model.LabelValue("/var/www/html"), model.LabelName("__meta_puppetdb_parameter_docroot"): model.LabelValue("/var/www/html"),
model.LabelName("__meta_puppetdb_parameter_ensure"): model.LabelValue("absent"), model.LabelName("__meta_puppetdb_parameter_ensure"): model.LabelValue("absent"),
model.LabelName("__meta_puppetdb_parameter_labels_alias"): model.LabelValue("edinburgh"), model.LabelName("__meta_puppetdb_parameter_labels_alias"): model.LabelValue("edinburgh"),
model.LabelName("__meta_puppetdb_parameter_options"): model.LabelValue("Indexes,FollowSymLinks,MultiViews"), model.LabelName("__meta_puppetdb_parameter_options"): model.LabelValue("Indexes,FollowSymLinks,MultiViews"),
model.LabelName("__meta_puppetdb_parameter_pi"): model.LabelValue("3.141592653589793"),
model.LabelName("__meta_puppetdb_parameter_port"): model.LabelValue("22"),
model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"), model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"),
model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"), model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"),
model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"), model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"),

View file

@ -46,6 +46,10 @@ func (p *Parameters) toLabels() model.LabelSet {
labelValue = value labelValue = value
case bool: case bool:
labelValue = strconv.FormatBool(value) labelValue = strconv.FormatBool(value)
case int64:
labelValue = strconv.FormatInt(value, 10)
case float64:
labelValue = strconv.FormatFloat(value, 'g', -1, 64)
case []string: case []string:
labelValue = separator + strings.Join(value, separator) + separator labelValue = separator + strings.Join(value, separator) + separator
case []interface{}: case []interface{}:
@ -59,6 +63,10 @@ func (p *Parameters) toLabels() model.LabelSet {
values[i] = value values[i] = value
case bool: case bool:
values[i] = strconv.FormatBool(value) values[i] = strconv.FormatBool(value)
case int64:
values[i] = strconv.FormatInt(value, 10)
case float64:
values[i] = strconv.FormatFloat(value, 'g', -1, 64)
case []string: case []string:
values[i] = separator + strings.Join(value, separator) + separator values[i] = separator + strings.Join(value, separator) + separator
} }

View file

@ -16,9 +16,9 @@ package scaleway
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os"
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -127,7 +127,7 @@ func mockScalewayInstance(w http.ResponseWriter, r *http.Request) {
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
instance, err := ioutil.ReadFile("testdata/instance.json") instance, err := os.ReadFile("testdata/instance.json")
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return

View file

@ -15,8 +15,8 @@ package scaleway
import ( import (
"context" "context"
"io/ioutil"
"net/http" "net/http"
"os"
"strings" "strings"
"time" "time"
@ -226,7 +226,7 @@ type authTokenFileRoundTripper struct {
// newAuthTokenFileRoundTripper adds the auth token read from the file to a request. // newAuthTokenFileRoundTripper adds the auth token read from the file to a request.
func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
// fail-fast if we can't read the file. // fail-fast if we can't read the file.
_, err := ioutil.ReadFile(tokenFile) _, err := os.ReadFile(tokenFile)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read auth token file %s", tokenFile) return nil, errors.Wrapf(err, "unable to read auth token file %s", tokenFile)
} }
@ -234,7 +234,7 @@ func newAuthTokenFileRoundTripper(tokenFile string, rt http.RoundTripper) (http.
} }
func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { func (rt *authTokenFileRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) {
b, err := ioutil.ReadFile(rt.authTokenFile) b, err := os.ReadFile(rt.authTokenFile)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read auth token file %s", rt.authTokenFile) return nil, errors.Wrapf(err, "unable to read auth token file %s", rt.authTokenFile)
} }

View file

@ -18,7 +18,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -207,11 +206,11 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
data, err := ioutil.ReadAll(resp.Body) data, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "an error occurred when reading the response body") return nil, errors.Wrap(err, "an error occurred when reading the response body")
} }

View file

@ -19,7 +19,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -195,7 +194,7 @@ func (rc *HTTPResourceClient) Fetch(ctx context.Context) (*v3.DiscoveryResponse,
return nil, err return nil, err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -208,7 +207,7 @@ func (rc *HTTPResourceClient) Fetch(ctx context.Context) (*v3.DiscoveryResponse,
return nil, fmt.Errorf("non 200 status '%d' response during xDS fetch", resp.StatusCode) return nil, fmt.Errorf("non 200 status '%d' response during xDS fetch", resp.StatusCode)
} }
respBody, err := ioutil.ReadAll(resp.Body) respBody, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,7 +16,6 @@ package xds
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
@ -60,9 +59,9 @@ func createTestHTTPServer(t *testing.T, responder discoveryResponder) *httptest.
require.Equal(t, "application/json", r.Header.Get("Content-Type")) require.Equal(t, "application/json", r.Header.Get("Content-Type"))
require.Equal(t, "application/json", r.Header.Get("Accept")) require.Equal(t, "application/json", r.Header.Get("Accept"))
body, err := ioutil.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
defer func() { defer func() {
_, _ = io.Copy(ioutil.Discard, r.Body) _, _ = io.Copy(io.Discard, r.Body)
_ = r.Body.Close() _ = r.Body.Close()
}() }()
require.NotEmpty(t, body) require.NotEmpty(t, body)

View file

@ -99,7 +99,7 @@ remote_read:
# Storage related settings that are runtime reloadable. # Storage related settings that are runtime reloadable.
storage: storage:
[ - <exemplars> ... ] [ exemplars: <exemplars> ]
# Configures exporting traces. # Configures exporting traces.
tracing: tracing:
@ -369,6 +369,12 @@ A `tls_config` allows configuring TLS connections.
# Disable validation of the server certificate. # Disable validation of the server certificate.
[ insecure_skip_verify: <boolean> ] [ insecure_skip_verify: <boolean> ]
# Minimum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
# If unset, Prometheus will use Go default minimum version, which is TLS 1.2.
# See MinVersion in https://pkg.go.dev/crypto/tls#Config.
[ min_version: <string> ]
``` ```
### `<oauth2>` ### `<oauth2>`
@ -2530,6 +2536,8 @@ anchored on both ends. To un-anchor the regex, use `.*<regex>.*`.
`target_label` to `replacement`, with match group references `target_label` to `replacement`, with match group references
(`${1}`, `${2}`, ...) in `replacement` substituted by their value. If `regex` (`${1}`, `${2}`, ...) in `replacement` substituted by their value. If `regex`
does not match, no replacement takes place. does not match, no replacement takes place.
* `lowercase`: Maps the concatenated `source_labels` to their lower case.
* `uppercase`: Maps the concatenated `source_labels` to their upper case.
* `keep`: Drop targets for which `regex` does not match the concatenated `source_labels`. * `keep`: Drop targets for which `regex` does not match the concatenated `source_labels`.
* `drop`: Drop targets for which `regex` matches the concatenated `source_labels`. * `drop`: Drop targets for which `regex` matches the concatenated `source_labels`.
* `hashmod`: Set `target_label` to the `modulus` of a hash of the concatenated `source_labels`. * `hashmod`: Set `target_label` to the `modulus` of a hash of the concatenated `source_labels`.

View file

@ -76,6 +76,7 @@ versions.
| graphLink | expr | string | Returns path to graph view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | | graphLink | expr | string | Returns path to graph view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. |
| tableLink | expr | string | Returns path to tabular ("Table") view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. | | tableLink | expr | string | Returns path to tabular ("Table") view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. |
| parseDuration | string | float | Parses a duration string such as "1h" into the number of seconds it represents. | | parseDuration | string | float | Parses a duration string such as "1h" into the number of seconds it represents. |
| stripDomain | string | string | Removes the domain part of a FQDN. Leaves port untouched. |
### Others ### Others

View file

@ -55,7 +55,7 @@ timestamps are always represented as Unix timestamps in seconds.
* `<series_selector>`: Prometheus [time series * `<series_selector>`: Prometheus [time series
selectors](basics.md#time-series-selectors) like `http_requests_total` or selectors](basics.md#time-series-selectors) like `http_requests_total` or
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. `http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
* `<duration>`: [Prometheus duration strings](basics.md#time_durations). * `<duration>`: [Prometheus duration strings](basics.md#time-durations).
For example, `5m` refers to a duration of 5 minutes. For example, `5m` refers to a duration of 5 minutes.
* `<bool>`: boolean values (strings `true` and `false`). * `<bool>`: boolean values (strings `true` and `false`).

View file

@ -17,8 +17,8 @@ Return all time series with the metric `http_requests_total` and the given
http_requests_total{job="apiserver", handler="/api/comments"} http_requests_total{job="apiserver", handler="/api/comments"}
Return a whole range of time (in this case 5 minutes) for the same vector, Return a whole range of time (in this case 5 minutes up to the query time)
making it a range vector: for the same vector, making it a [range vector](../basics/#range-vector-selectors):
http_requests_total{job="apiserver", handler="/api/comments"}[5m] http_requests_total{job="apiserver", handler="/api/comments"}[5m]

View file

@ -18,7 +18,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -101,11 +100,11 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -168,8 +167,8 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
continue continue
} }
b, err := ioutil.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error reading services list", "err", err) level.Error(d.logger).Log("msg", "Error reading services list", "err", err)

View file

@ -18,7 +18,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -117,7 +116,7 @@ func (a *Adapter) writeOutput() error {
b, _ := json.MarshalIndent(arr, "", " ") b, _ := json.MarshalIndent(arr, "", " ")
dir, _ := filepath.Split(a.output) dir, _ := filepath.Split(a.output)
tmpfile, err := ioutil.TempFile(dir, "sd-adapter") tmpfile, err := os.CreateTemp(dir, "sd-adapter")
if err != nil { if err != nil {
return err return err
} }

View file

@ -15,7 +15,6 @@ package adapter
import ( import (
"context" "context"
"io/ioutil"
"os" "os"
"testing" "testing"
@ -223,7 +222,7 @@ func TestGenerateTargetGroups(t *testing.T) {
// TestWriteOutput checks the adapter can write a file to disk. // TestWriteOutput checks the adapter can write a file to disk.
func TestWriteOutput(t *testing.T) { func TestWriteOutput(t *testing.T) {
ctx := context.Background() ctx := context.Background()
tmpfile, err := ioutil.TempFile("", "sd_adapter_test") tmpfile, err := os.CreateTemp("", "sd_adapter_test")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
tmpfile.Close() tmpfile.Close()

View file

@ -14,7 +14,7 @@
package influxdb package influxdb
import ( import (
"io/ioutil" "io"
"math" "math"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -76,7 +76,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
func(w http.ResponseWriter, r *http.Request) { func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "POST", r.Method, "Unexpected method.") require.Equal(t, "POST", r.Method, "Unexpected method.")
require.Equal(t, "/write", r.URL.Path, "Unexpected path.") require.Equal(t, "/write", r.URL.Path, "Unexpected path.")
b, err := ioutil.ReadAll(r.Body) b, err := io.ReadAll(r.Body)
require.NoError(t, err, "Error reading body.") require.NoError(t, err, "Error reading body.")
require.Equal(t, expectedBody, string(b), "Unexpected request body.") require.Equal(t, expectedBody, string(b), "Unexpected request body.")
}, },

View file

@ -16,7 +16,7 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"net/url" "net/url"
@ -234,7 +234,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e
}) })
http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) {
compressed, err := ioutil.ReadAll(r.Body) compressed, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Read error", "err", err.Error()) level.Error(logger).Log("msg", "Read error", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)

View file

@ -18,7 +18,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"math" "math"
"net/http" "net/http"
"net/url" "net/url"
@ -116,7 +115,7 @@ func (c *Client) Write(samples model.Samples) error {
return err return err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -128,7 +127,7 @@ func (c *Client) Write(samples model.Samples) error {
// API returns status code 400 on error, encoding error details in the // API returns status code 400 on error, encoding error details in the
// response content in JSON. // response content in JSON.
buf, err = ioutil.ReadAll(resp.Body) buf, err = io.ReadAll(resp.Body)
if err != nil { if err != nil {
return err return err
} }

94
go.mod
View file

@ -18,7 +18,7 @@ require (
github.com/digitalocean/godo v1.78.0 github.com/digitalocean/godo v1.78.0
github.com/docker/docker v20.10.14+incompatible github.com/docker/docker v20.10.14+incompatible
github.com/edsrzf/mmap-go v1.1.0 github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.10.1 github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1
github.com/envoyproxy/protoc-gen-validate v0.6.7 github.com/envoyproxy/protoc-gen-validate v0.6.7
github.com/fsnotify/fsnotify v1.5.1 github.com/fsnotify/fsnotify v1.5.1
github.com/go-kit/log v0.2.0 github.com/go-kit/log v0.2.0
@ -37,7 +37,7 @@ require (
github.com/hetznercloud/hcloud-go v1.33.1 github.com/hetznercloud/hcloud-go v1.33.1
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/linode/linodego v1.4.0 github.com/linode/linodego v1.4.1
github.com/miekg/dns v1.1.48 github.com/miekg/dns v1.1.48
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -48,7 +48,7 @@ require (
github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/alertmanager v0.24.0
github.com/prometheus/client_golang v1.12.1 github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.33.0 github.com/prometheus/common v0.34.0
github.com/prometheus/common/assets v0.1.0 github.com/prometheus/common/assets v0.1.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.7.1 github.com/prometheus/exporter-toolkit v0.7.1
@ -56,30 +56,30 @@ require (
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/stretchr/testify v1.7.1 github.com/stretchr/testify v1.7.1
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0
go.opentelemetry.io/otel v1.6.1 go.opentelemetry.io/otel v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.1 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.1
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.1
go.opentelemetry.io/otel/sdk v1.6.1 go.opentelemetry.io/otel/sdk v1.6.1
go.opentelemetry.io/otel/trace v1.6.1 go.opentelemetry.io/otel/trace v1.7.0
go.uber.org/atomic v1.9.0 go.uber.org/atomic v1.9.0
go.uber.org/automaxprocs v1.4.0 go.uber.org/automaxprocs v1.5.1
go.uber.org/goleak v1.1.12 go.uber.org/goleak v1.1.12
golang.org/x/net v0.0.0-20220325170049-de3da57026de golang.org/x/net v0.0.0-20220412020605-290c469a71a5
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65
golang.org/x/tools v0.1.10 golang.org/x/tools v0.1.10
google.golang.org/api v0.74.0 google.golang.org/api v0.77.0
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4
google.golang.org/grpc v1.45.0 google.golang.org/grpc v1.46.0
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.23.5 k8s.io/api v0.23.6
k8s.io/apimachinery v0.23.5 k8s.io/apimachinery v0.23.6
k8s.io/client-go v0.23.5 k8s.io/client-go v0.23.5
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.40.1 k8s.io/klog/v2 v2.40.1
@ -124,3 +124,69 @@ exclude (
k8s.io/client-go v9.0.0+incompatible k8s.io/client-go v9.0.0+incompatible
k8s.io/client-go v9.0.0-invalid+incompatible k8s.io/client-go v9.0.0-invalid+incompatible
) )
retract (
v2.5.0+incompatible
v2.5.0-rc.2+incompatible
v2.5.0-rc.1+incompatible
v2.5.0-rc.0+incompatible
v2.4.3+incompatible
v2.4.2+incompatible
v2.4.1+incompatible
v2.4.0+incompatible
v2.4.0-rc.0+incompatible
v2.3.2+incompatible
v2.3.1+incompatible
v2.3.0+incompatible
v2.2.1+incompatible
v2.2.0+incompatible
v2.2.0-rc.1+incompatible
v2.2.0-rc.0+incompatible
v2.1.0+incompatible
v2.0.0+incompatible
v2.0.0-rc.3+incompatible
v2.0.0-rc.2+incompatible
v2.0.0-rc.1+incompatible
v2.0.0-rc.0+incompatible
v2.0.0-beta.5+incompatible
v2.0.0-beta.4+incompatible
v2.0.0-beta.3+incompatible
v2.0.0-beta.2+incompatible
v2.0.0-beta.1+incompatible
v2.0.0-beta.0+incompatible
v2.0.0-alpha.3+incompatible
v2.0.0-alpha.2+incompatible
v2.0.0-alpha.1+incompatible
v2.0.0-alpha.0+incompatible
v1.8.2
v1.8.1
v1.8.0
v1.7.2
v1.7.1
v1.7.0
v1.6.3
v1.6.2
v1.6.1
v1.6.0
v1.5.3
v1.5.2
v1.5.1
v1.5.0
v1.4.1
v1.4.0
v1.3.1
v1.3.0
v1.3.0-beta.0
v1.2.3
v1.2.2
v1.2.1
v1.2.0
v1.1.3
v1.1.2
v1.1.1
v1.1.0
v1.0.2
v1.0.1
v1.0.0
v1.0.0-rc.0
)

59
go.sum
View file

@ -37,8 +37,9 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0 h1:XdQIN5mdPTSBVwSIVDuY5e8ZzVAccsHvD3qTEz4zIps=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
@ -406,8 +407,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8= github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
@ -658,8 +659,9 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0 h1:nRJtk3y8Fm770D42QV6T90ZnvFZyk7agSo3Q+Z9p3WI=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
@ -824,8 +826,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.4.0 h1:jayhYLQOWxvr9fchCuB+OWTL+LP3OUPgdr/bc4YLdls= github.com/linode/linodego v1.4.1 h1:cgpY1jCZ47wfJvWH5V8in7Tphj8T0sR1URiH9e6G2bA=
github.com/linode/linodego v1.4.0/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8= github.com/linode/linodego v1.4.1/go.mod h1:PVsRxSlOiJyvG4/scTszpmZDTdgS+to3X6eS8pRrWI8=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
@ -1026,6 +1028,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw=
github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@ -1061,8 +1065,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE=
github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE=
github.com/prometheus/common/assets v0.1.0 h1:8WlWPDRjbfff4FWCBjaUF0NEIgDD2Mv2anoKfwG+Ums= github.com/prometheus/common/assets v0.1.0 h1:8WlWPDRjbfff4FWCBjaUF0NEIgDD2Mv2anoKfwG+Ums=
github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -1253,8 +1257,9 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ=
go.opentelemetry.io/otel v1.6.1 h1:6r1YrcTenBvYa1x491d0GGpTVBsNECmrc/K6b+zDeis=
go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
@ -1282,8 +1287,9 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE=
go.opentelemetry.io/otel/trace v1.6.1 h1:f8c93l5tboBYZna1nWk0W9DYyMzJXDWdZcJZ0Kb400U=
go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
go.opentelemetry.io/proto/otlp v0.12.1 h1:kfx2sboxOGFvGJcH2C408CiVo2wVHC2av2XHNqj4vEg= go.opentelemetry.io/proto/otlp v0.12.1 h1:kfx2sboxOGFvGJcH2C408CiVo2wVHC2av2XHNqj4vEg=
@ -1294,8 +1300,8 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -1439,8 +1445,9 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1458,8 +1465,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1596,8 +1604,9 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 h1:eJv7u3ksNXoLbGSKuv2s/SIO4tJVxc/A+MTpzxDgz/Q=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1704,8 +1713,9 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -1740,8 +1750,9 @@ google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tD
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0 h1:ExR2D+5TYIrMphWgs5JCgwRhEDlPDXXrLwHHMgPHTXE=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/api v0.77.0 h1:msijLTxwkJ7Jub5tv9KBVCKtHOQwnvnvkX7ErFFCVxY=
google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1823,8 +1834,11 @@ google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb h1:0m9wktIpOxGw+SSKmydXWB3Z3GTfcPP6+q75HCQa6HI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 h1:myaecH64R0bIEDjNORIel4iXubqzaHU1K2z8ajBwWcM=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1861,8 +1875,9 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1938,15 +1953,17 @@ k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/api v0.23.6 h1:yOK34wbYECH4RsJbQ9sfkFK3O7f/DUHRlzFehkqZyVw=
k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.23.6 h1:RH1UweWJkWNTlFx0D8uxOpaU1tjIOvVVWV/bu5b3/NQ=
k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=

View file

@ -54,6 +54,10 @@ const (
LabelDrop Action = "labeldrop" LabelDrop Action = "labeldrop"
// LabelKeep drops any label not matching the regex. // LabelKeep drops any label not matching the regex.
LabelKeep Action = "labelkeep" LabelKeep Action = "labelkeep"
// Lowercase maps input letters to their lower case.
Lowercase Action = "lowercase"
// Uppercase maps input letters to their upper case.
Uppercase Action = "uppercase"
) )
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -63,7 +67,7 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
switch act := Action(strings.ToLower(s)); act { switch act := Action(strings.ToLower(s)); act {
case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep: case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep, Lowercase, Uppercase:
*a = act *a = act
return nil return nil
} }
@ -106,12 +110,15 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Modulus == 0 && c.Action == HashMod { if c.Modulus == 0 && c.Action == HashMod {
return errors.Errorf("relabel configuration for hashmod requires non-zero modulus") return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
} }
if (c.Action == Replace || c.Action == HashMod) && c.TargetLabel == "" { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase) && c.TargetLabel == "" {
return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
} }
if c.Action == Replace && !relabelTarget.MatchString(c.TargetLabel) { if (c.Action == Replace || c.Action == Lowercase || c.Action == Uppercase) && !relabelTarget.MatchString(c.TargetLabel) {
return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
} }
if (c.Action == Lowercase || c.Action == Uppercase) && c.Replacement != DefaultRelabelConfig.Replacement {
return errors.Errorf("'replacement' can not be set for %s action", c.Action)
}
if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) {
return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
} }
@ -228,6 +235,10 @@ func relabel(lset labels.Labels, cfg *Config) labels.Labels {
break break
} }
lb.Set(string(target), string(res)) lb.Set(string(target), string(res))
case Lowercase:
lb.Set(cfg.TargetLabel, strings.ToLower(val))
case Uppercase:
lb.Set(cfg.TargetLabel, strings.ToUpper(val))
case HashMod: case HashMod:
mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))

View file

@ -428,6 +428,28 @@ func TestRelabel(t *testing.T) {
"a": "foo", "a": "foo",
}), }),
}, },
{
input: labels.FromMap(map[string]string{
"foo": "bAr123Foo",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"foo"},
Action: Uppercase,
TargetLabel: "foo_uppercase",
},
{
SourceLabels: model.LabelNames{"foo"},
Action: Lowercase,
TargetLabel: "foo_lowercase",
},
},
output: labels.FromMap(map[string]string{
"foo": "bAr123Foo",
"foo_lowercase": "bar123foo",
"foo_uppercase": "BAR123FOO",
}),
},
} }
for _, test := range tests { for _, test := range tests {

View file

@ -17,7 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"io" "io"
"io/ioutil" "os"
"strings" "strings"
"time" "time"
@ -304,7 +304,7 @@ func Parse(content []byte) (*RuleGroups, []error) {
// ParseFile reads and parses rules from a file. // ParseFile reads and parses rules from a file.
func ParseFile(file string) (*RuleGroups, []error) { func ParseFile(file string) (*RuleGroups, []error) {
b, err := ioutil.ReadFile(file) b, err := os.ReadFile(file)
if err != nil { if err != nil {
return nil, []error{errors.Wrap(err, file)} return nil, []error{errors.Wrap(err, file)}
} }

View file

@ -17,7 +17,6 @@ import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"io" "io"
"io/ioutil"
"os" "os"
"testing" "testing"
@ -361,7 +360,7 @@ func BenchmarkParse(b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
defer f.Close() defer f.Close()
buf, err := ioutil.ReadAll(f) buf, err := io.ReadAll(f)
require.NoError(b, err) require.NoError(b, err)
b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) { b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
@ -501,7 +500,7 @@ func BenchmarkGzip(b *testing.B) {
require.NoError(b, err) require.NoError(b, err)
require.NoError(b, gw.Close()) require.NoError(b, gw.Close())
gbuf, err := ioutil.ReadAll(&buf) gbuf, err := io.ReadAll(&buf)
require.NoError(b, err) require.NoError(b, err)
k := b.N / promtestdataSampleCount k := b.N / promtestdataSampleCount
@ -516,7 +515,7 @@ func BenchmarkGzip(b *testing.B) {
gr, err := gzip.NewReader(bytes.NewReader(gbuf)) gr, err := gzip.NewReader(bytes.NewReader(gbuf))
require.NoError(b, err) require.NoError(b, err)
d, err := ioutil.ReadAll(gr) d, err := io.ReadAll(gr)
require.NoError(b, err) require.NoError(b, err)
require.NoError(b, gr.Close()) require.NoError(b, gr.Close())

View file

@ -19,7 +19,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -583,7 +582,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
return err return err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()

View file

@ -18,7 +18,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@ -126,7 +126,7 @@ func TestHandlerSendAll(t *testing.T) {
return return
} }
b, err := ioutil.ReadAll(r.Body) b, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
err = errors.Errorf("error reading body: %v", err) err = errors.Errorf("error reading body: %v", err)
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
@ -221,7 +221,7 @@ func TestCustomDo(t *testing.T) {
h := NewManager(&Options{ h := NewManager(&Options{
Do: func(_ context.Context, client *http.Client, req *http.Request) (*http.Response, error) { Do: func(_ context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
received = true received = true
body, err := ioutil.ReadAll(req.Body) body, err := io.ReadAll(req.Body)
require.NoError(t, err) require.NoError(t, err)
@ -230,7 +230,7 @@ func TestCustomDo(t *testing.T) {
require.Equal(t, testURL, req.URL.String()) require.Equal(t, testURL, req.URL.String())
return &http.Response{ return &http.Response{
Body: ioutil.NopCloser(bytes.NewBuffer(nil)), Body: io.NopCloser(bytes.NewBuffer(nil)),
}, nil }, nil
}, },
}, nil) }, nil)
@ -331,7 +331,7 @@ func TestHandlerQueuing(t *testing.T) {
case expected := <-expectedc: case expected := <-expectedc:
var alerts []*Alert var alerts []*Alert
b, err := ioutil.ReadAll(r.Body) b, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -18,7 +18,6 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil"
"log" "log"
"os" "os"
"path" "path"
@ -30,7 +29,7 @@ import (
//go:generate go run generate.go //go:generate go run generate.go
func main() { func main() {
data, err := ioutil.ReadFile(filepath.Join("..", "plugins.yml")) data, err := os.ReadFile(filepath.Join("..", "plugins.yml"))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }

View file

@ -127,7 +127,7 @@ func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
type MetricMetadata struct { type MetricMetadata struct {
// Represents the metric type, these match the set from Prometheus. // Represents the metric type, these match the set from Prometheus.
// Refer to pkg/textparse/interface.go for details. // Refer to model/textparse/interface.go for details.
Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"`
MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
@ -200,7 +200,7 @@ func (m *MetricMetadata) GetUnit() string {
type Sample struct { type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see pkg/timestamp/timestamp.go for // timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp. // conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
@ -259,7 +259,7 @@ type Exemplar struct {
// Optional, can be empty. // Optional, can be empty.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see pkg/timestamp/timestamp.go for // timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp. // conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`

View file

@ -31,7 +31,7 @@ message MetricMetadata {
} }
// Represents the metric type, these match the set from Prometheus. // Represents the metric type, these match the set from Prometheus.
// Refer to pkg/textparse/interface.go for details. // Refer to model/textparse/interface.go for details.
MetricType type = 1; MetricType type = 1;
string metric_family_name = 2; string metric_family_name = 2;
string help = 4; string help = 4;
@ -40,7 +40,7 @@ message MetricMetadata {
message Sample { message Sample {
double value = 1; double value = 1;
// timestamp is in ms format, see pkg/timestamp/timestamp.go for // timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp. // conversion from time.Time to Prometheus timestamp.
int64 timestamp = 2; int64 timestamp = 2;
} }
@ -49,7 +49,7 @@ message Exemplar {
// Optional, can be empty. // Optional, can be empty.
repeated Label labels = 1 [(gogoproto.nullable) = false]; repeated Label labels = 1 [(gogoproto.nullable) = false];
double value = 2; double value = 2;
// timestamp is in ms format, see pkg/timestamp/timestamp.go for // timestamp is in ms format, see model/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp. // conversion from time.Time to Prometheus timestamp.
int64 timestamp = 3; int64 timestamp = 3;
} }

View file

@ -1401,7 +1401,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
enh.Ts = ts enh.Ts = ts
// Make the function call. // Make the function call.
outVec := call(inArgs, e.Args, enh) outVec := call(inArgs, e.Args, enh)
ev.samplesStats.IncrementSamplesAtStep(step, len(points)) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(points)))
enh.Out = outVec[:0] enh.Out = outVec[:0]
if len(outVec) > 0 { if len(outVec) > 0 {
ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts}) ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, H: outVec[0].Point.H, T: ts})
@ -1793,7 +1793,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag
} }
ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16))
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, len(ss.Points)) ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, int64(len(ss.Points)))
if len(ss.Points) > 0 { if len(ss.Points) > 0 {
matrix = append(matrix, ss) matrix = append(matrix, ss)

View file

@ -17,7 +17,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os" "os"
"sort" "sort"
@ -46,7 +45,7 @@ func TestMain(m *testing.M) {
func TestQueryConcurrency(t *testing.T) { func TestQueryConcurrency(t *testing.T) {
maxConcurrency := 10 maxConcurrency := 10
dir, err := ioutil.TempDir("", "test_concurrency") dir, err := os.MkdirTemp("", "test_concurrency")
require.NoError(t, err) require.NoError(t, err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil)
@ -751,7 +750,6 @@ load 10s
metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
`) `)
require.NoError(t, err) require.NoError(t, err)
defer test.Close() defer test.Close()
@ -761,7 +759,7 @@ load 10s
cases := []struct { cases := []struct {
Query string Query string
SkipMaxCheck bool SkipMaxCheck bool
TotalSamples int TotalSamples int64
TotalSamplesPerStep stats.TotalSamplesPerStep TotalSamplesPerStep stats.TotalSamplesPerStep
PeakSamples int PeakSamples int
Start time.Time Start time.Time

View file

@ -15,7 +15,6 @@ package promql
import ( import (
"context" "context"
"io/ioutil"
"os" "os"
"testing" "testing"
@ -105,7 +104,7 @@ func TestIndexReuse(t *testing.T) {
} }
func TestMMapFile(t *testing.T) { func TestMMapFile(t *testing.T) {
file, err := ioutil.TempFile("", "mmapedFile") file, err := os.CreateTemp("", "mmapedFile")
require.NoError(t, err) require.NoError(t, err)
filename := file.Name() filename := file.Name()

View file

@ -16,8 +16,8 @@ package promql
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -78,7 +78,7 @@ func NewTest(t testutil.T, input string) (*Test, error) {
} }
func newTestFromFile(t testutil.T, filename string) (*Test, error) { func newTestFromFile(t testutil.T, filename string) (*Test, error) {
content, err := ioutil.ReadFile(filename) content, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,7 +16,6 @@ package rules
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os" "os"
"sort" "sort"
@ -753,7 +752,7 @@ func TestUpdate(t *testing.T) {
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml") rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
require.Equal(t, 0, len(errs), "file parsing failures") require.Equal(t, 0, len(errs), "file parsing failures")
tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml") tmpFile, err := os.CreateTemp("", "rules.test.*.yaml")
require.NoError(t, err) require.NoError(t, err)
defer os.Remove(tmpFile.Name()) defer os.Remove(tmpFile.Name())
defer tmpFile.Close() defer tmpFile.Close()

View file

@ -124,6 +124,9 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager
// Options are the configuration parameters to the scrape manager. // Options are the configuration parameters to the scrape manager.
type Options struct { type Options struct {
ExtraMetrics bool ExtraMetrics bool
// Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool
// Optional HTTP client options to use when scraping. // Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption HTTPClientOptions []config_util.HTTPClientOption
@ -195,7 +198,7 @@ func (m *Manager) reload() {
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
continue continue
} }
sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.HTTPClientOptions) sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics, m.opts.PassMetadataInContext, m.opts.HTTPClientOptions)
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
continue continue

View file

@ -20,7 +20,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"net/http" "net/http"
"reflect" "reflect"
@ -266,7 +265,7 @@ const maxAheadTime = 10 * time.Minute
type labelsMutator func(labels.Labels) labels.Labels type labelsMutator func(labels.Labels) labels.Labels
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) { func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics, passMetadataInContext bool, httpOpts []config_util.HTTPClientOption) (*scrapePool, error) {
targetScrapePools.Inc() targetScrapePools.Inc()
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
@ -299,12 +298,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
} }
opts.target.SetMetadataStore(cache) opts.target.SetMetadataStore(cache)
// Store the cache in the context.
loopCtx := ContextWithMetricMetadataStore(ctx, cache)
loopCtx = ContextWithTarget(loopCtx, opts.target)
return newScrapeLoop( return newScrapeLoop(
loopCtx, ctx,
opts.scraper, opts.scraper,
log.With(logger, "target", opts.target), log.With(logger, "target", opts.target),
buffers, buffers,
@ -321,6 +316,9 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
opts.interval, opts.interval,
opts.timeout, opts.timeout,
reportExtraMetrics, reportExtraMetrics,
opts.target,
cache,
passMetadataInContext,
) )
} }
@ -620,7 +618,7 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits.labelLimit > 0 { if limits.labelLimit > 0 {
nbLabels := len(lset) nbLabels := len(lset)
if nbLabels > int(limits.labelLimit) { if nbLabels > int(limits.labelLimit) {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of label: %d, limit: %d)", met, nbLabels, limits.labelLimit) return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
} }
} }
@ -632,14 +630,14 @@ func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits.labelNameLengthLimit > 0 { if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name) nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) { if nameLength > int(limits.labelNameLengthLimit) {
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label: %.50v, name length: %d, limit: %d)", met, l, nameLength, limits.labelNameLengthLimit) return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
} }
} }
if limits.labelValueLengthLimit > 0 { if limits.labelValueLengthLimit > 0 {
valueLength := len(l.Value) valueLength := len(l.Value)
if valueLength > int(limits.labelValueLengthLimit) { if valueLength > int(limits.labelValueLengthLimit) {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label: %.50v, value length: %d, limit: %d)", met, l, valueLength, limits.labelValueLengthLimit) return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
} }
} }
} }
@ -778,7 +776,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
return "", err return "", err
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
}() }()
@ -861,10 +859,11 @@ type scrapeLoop struct {
sampleMutator labelsMutator sampleMutator labelsMutator
reportSampleMutator labelsMutator reportSampleMutator labelsMutator
parentCtx context.Context parentCtx context.Context
ctx context.Context appenderCtx context.Context
cancel func() ctx context.Context
stopped chan struct{} cancel func()
stopped chan struct{}
disabledEndOfRunStalenessMarkers bool disabledEndOfRunStalenessMarkers bool
@ -1130,6 +1129,9 @@ func newScrapeLoop(ctx context.Context,
interval time.Duration, interval time.Duration,
timeout time.Duration, timeout time.Duration,
reportExtraMetrics bool, reportExtraMetrics bool,
target *Target,
metricMetadataStore MetricMetadataStore,
passMetadataInContext bool,
) *scrapeLoop { ) *scrapeLoop {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
@ -1140,6 +1142,18 @@ func newScrapeLoop(ctx context.Context,
if cache == nil { if cache == nil {
cache = newScrapeCache() cache = newScrapeCache()
} }
appenderCtx := ctx
if passMetadataInContext {
// Store the cache and target in the context. This is then used by downstream OTel Collector
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache)
appenderCtx = ContextWithTarget(appenderCtx, target)
}
sl := &scrapeLoop{ sl := &scrapeLoop{
scraper: sc, scraper: sc,
buffers: buffers, buffers: buffers,
@ -1151,6 +1165,7 @@ func newScrapeLoop(ctx context.Context,
jitterSeed: jitterSeed, jitterSeed: jitterSeed,
l: l, l: l,
parentCtx: ctx, parentCtx: ctx,
appenderCtx: appenderCtx,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
labelLimits: labelLimits, labelLimits: labelLimits,
@ -1229,7 +1244,7 @@ mainLoop:
// scrapeAndReport performs a scrape and then appends the result to the storage // scrapeAndReport performs a scrape and then appends the result to the storage
// together with reporting metrics, by using as few appenders as possible. // together with reporting metrics, by using as few appenders as possible.
// In the happy scenario, a single appender is used. // In the happy scenario, a single appender is used.
// This function uses sl.parentCtx instead of sl.ctx on purpose. A scrape should // This function uses sl.appenderCtx instead of sl.ctx on purpose. A scrape should
// only be cancelled on shutdown, not on reloads. // only be cancelled on shutdown, not on reloads.
func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time { func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time {
start := time.Now() start := time.Now()
@ -1248,7 +1263,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
var total, added, seriesAdded, bytes int var total, added, seriesAdded, bytes int
var err, appErr, scrapeErr error var err, appErr, scrapeErr error
app := sl.appender(sl.parentCtx) app := sl.appender(sl.appenderCtx)
defer func() { defer func() {
if err != nil { if err != nil {
app.Rollback() app.Rollback()
@ -1271,7 +1286,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
// Add stale markers. // Add stale markers.
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.parentCtx) app = sl.appender(sl.appenderCtx)
level.Warn(sl.l).Log("msg", "Append failed", "err", err) level.Warn(sl.l).Log("msg", "Append failed", "err", err)
} }
if errc != nil { if errc != nil {
@ -1310,13 +1325,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime) total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime)
if appErr != nil { if appErr != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.parentCtx) app = sl.appender(sl.appenderCtx)
level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) level.Debug(sl.l).Log("msg", "Append failed", "err", appErr)
// The append failed, probably due to a parse error or sample limit. // The append failed, probably due to a parse error or sample limit.
// Call sl.append again with an empty scrape to trigger stale markers. // Call sl.append again with an empty scrape to trigger stale markers.
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.parentCtx) app = sl.appender(sl.appenderCtx)
level.Warn(sl.l).Log("msg", "Append failed", "err", err) level.Warn(sl.l).Log("msg", "Append failed", "err", err)
} }
} }
@ -1380,7 +1395,8 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
// Call sl.append again with an empty scrape to trigger stale markers. // Call sl.append again with an empty scrape to trigger stale markers.
// If the target has since been recreated and scraped, the // If the target has since been recreated and scraped, the
// stale markers will be out of order and ignored. // stale markers will be out of order and ignored.
app := sl.appender(sl.ctx) // sl.context would have been cancelled, hence using sl.appenderCtx.
app := sl.appender(sl.appenderCtx)
var err error var err error
defer func() { defer func() {
if err != nil { if err != nil {
@ -1394,7 +1410,7 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int
}() }()
if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
app.Rollback() app.Rollback()
app = sl.appender(sl.ctx) app = sl.appender(sl.appenderCtx)
level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) level.Warn(sl.l).Log("msg", "Stale append failed", "err", err)
} }
if err = sl.reportStale(app, staleTime); err != nil { if err = sl.reportStale(app, staleTime); err != nil {

View file

@ -19,7 +19,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -58,7 +57,7 @@ func TestNewScrapePool(t *testing.T) {
var ( var (
app = &nopAppendable{} app = &nopAppendable{}
cfg = &config.ScrapeConfig{} cfg = &config.ScrapeConfig{}
sp, _ = newScrapePool(cfg, app, 0, nil, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
) )
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app { if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
@ -93,7 +92,7 @@ func TestDroppedTargetsList(t *testing.T) {
}, },
}, },
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}" expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
expectedLength = 1 expectedLength = 1
) )
@ -456,7 +455,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
func TestScrapePoolAppender(t *testing.T) { func TestScrapePoolAppender(t *testing.T) {
cfg := &config.ScrapeConfig{} cfg := &config.ScrapeConfig{}
app := &nopAppendable{} app := &nopAppendable{}
sp, _ := newScrapePool(cfg, app, 0, nil, false, nil) sp, _ := newScrapePool(cfg, app, 0, nil, false, false, nil)
loop := sp.newLoop(scrapeLoopOptions{ loop := sp.newLoop(scrapeLoopOptions{
target: &Target{}, target: &Target{},
@ -498,7 +497,7 @@ func TestScrapePoolRaces(t *testing.T) {
newConfig := func() *config.ScrapeConfig { newConfig := func() *config.ScrapeConfig {
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout} return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
} }
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false, nil) sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, false, false, nil)
tgts := []*targetgroup.Group{ tgts := []*targetgroup.Group{
{ {
Targets: []model.LabelSet{ Targets: []model.LabelSet{
@ -592,6 +591,9 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
1, 1,
0, 0,
false, false,
nil,
nil,
false,
) )
// The scrape pool synchronizes on stopping scrape loops. However, new scrape // The scrape pool synchronizes on stopping scrape loops. However, new scrape
@ -661,6 +663,9 @@ func TestScrapeLoopStop(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
// Terminate loop after 2 scrapes. // Terminate loop after 2 scrapes.
@ -733,6 +738,9 @@ func TestScrapeLoopRun(t *testing.T) {
time.Second, time.Second,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
// The loop must terminate during the initial offset if the context // The loop must terminate during the initial offset if the context
@ -785,6 +793,9 @@ func TestScrapeLoopRun(t *testing.T) {
time.Second, time.Second,
100*time.Millisecond, 100*time.Millisecond,
false, false,
nil,
nil,
false,
) )
go func() { go func() {
@ -841,6 +852,9 @@ func TestScrapeLoopForcedErr(t *testing.T) {
time.Second, time.Second,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
forcedErr := fmt.Errorf("forced err") forcedErr := fmt.Errorf("forced err")
@ -896,6 +910,9 @@ func TestScrapeLoopMetadata(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
defer cancel() defer cancel()
@ -950,6 +967,9 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
t.Cleanup(func() { cancel() }) t.Cleanup(func() { cancel() })
@ -1040,6 +1060,9 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
numScrapes := 0 numScrapes := 0
@ -1099,6 +1122,9 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
@ -1162,6 +1188,9 @@ func TestScrapeLoopCache(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
numScrapes := 0 numScrapes := 0
@ -1241,6 +1270,9 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
numScrapes := 0 numScrapes := 0
@ -1352,6 +1384,9 @@ func TestScrapeLoopAppend(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1439,7 +1474,7 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil) return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
}, },
nil, nil,
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, nil, 0, 0, false, nil, nil, false,
) )
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC)) _, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
@ -1475,6 +1510,9 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
@ -1530,6 +1568,9 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
// Get the value of the Counter before performing the append. // Get the value of the Counter before performing the append.
@ -1604,6 +1645,9 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1649,6 +1693,9 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1697,6 +1744,9 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1805,6 +1855,9 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1867,6 +1920,9 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -1916,6 +1972,9 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1949,6 +2008,9 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1995,6 +2057,9 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Unix(1, 0) now := time.Unix(1, 0)
@ -2037,6 +2102,9 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now().Add(20 * time.Minute) now := time.Now().Add(20 * time.Minute)
@ -2130,7 +2198,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
}() }()
go func() { go func() {
_, err := ts.scrape(ctx, ioutil.Discard) _, err := ts.scrape(ctx, io.Discard)
if err == nil { if err == nil {
errc <- errors.New("Expected error but got nil") errc <- errors.New("Expected error but got nil")
} else if ctx.Err() != context.Canceled { } else if ctx.Err() != context.Canceled {
@ -2174,7 +2242,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
client: http.DefaultClient, client: http.DefaultClient,
} }
_, err = ts.scrape(context.Background(), ioutil.Discard) _, err = ts.scrape(context.Background(), io.Discard)
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
} }
@ -2291,6 +2359,9 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -2329,6 +2400,9 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
now := time.Now() now := time.Now()
@ -2366,6 +2440,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
defer cancel() defer cancel()
@ -2421,6 +2498,9 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
defer cancel() defer cancel()
@ -2513,7 +2593,7 @@ func TestReuseScrapeCache(t *testing.T) {
ScrapeInterval: model.Duration(5 * time.Second), ScrapeInterval: model.Duration(5 * time.Second),
MetricsPath: "/metrics", MetricsPath: "/metrics",
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
t1 = &Target{ t1 = &Target{
discoveredLabels: labels.Labels{ discoveredLabels: labels.Labels{
labels.Label{ labels.Label{
@ -2694,6 +2774,9 @@ func TestScrapeAddFast(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
defer cancel() defer cancel()
@ -2723,7 +2806,7 @@ func TestReuseCacheRace(t *testing.T) {
ScrapeInterval: model.Duration(5 * time.Second), ScrapeInterval: model.Duration(5 * time.Second),
MetricsPath: "/metrics", MetricsPath: "/metrics",
} }
sp, _ = newScrapePool(cfg, app, 0, nil, false, nil) sp, _ = newScrapePool(cfg, app, 0, nil, false, false, nil)
t1 = &Target{ t1 = &Target{
discoveredLabels: labels.Labels{ discoveredLabels: labels.Labels{
labels.Label{ labels.Label{
@ -2782,6 +2865,9 @@ func TestScrapeReportSingleAppender(t *testing.T) {
10*time.Millisecond, 10*time.Millisecond,
time.Hour, time.Hour,
false, false,
nil,
nil,
false,
) )
numScrapes := 0 numScrapes := 0
@ -2852,7 +2938,7 @@ func TestScrapeReportLimit(t *testing.T) {
})) }))
defer ts.Close() defer ts.Close()
sp, err := newScrapePool(cfg, s, 0, nil, false, nil) sp, err := newScrapePool(cfg, s, 0, nil, false, false, nil)
require.NoError(t, err) require.NoError(t, err)
defer sp.stop() defer sp.stop()
@ -2981,6 +3067,9 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
0, 0,
0, 0,
false, false,
nil,
nil,
false,
) )
slApp := sl.appender(context.Background()) slApp := sl.appender(context.Background())
@ -3019,7 +3108,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
}, },
}, },
} }
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false, nil) sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, false, false, nil)
tgts := []*targetgroup.Group{ tgts := []*targetgroup.Group{
{ {
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}}, Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},

View file

@ -17,10 +17,10 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"os"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -337,7 +337,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
func newTLSConfig(certName string, t *testing.T) *tls.Config { func newTLSConfig(certName string, t *testing.T) *tls.Config {
tlsConfig := &tls.Config{} tlsConfig := &tls.Config{}
caCertPool := x509.NewCertPool() caCertPool := x509.NewCertPool()
caCert, err := ioutil.ReadFile(caCertPath) caCert, err := os.ReadFile(caCertPath)
if err != nil { if err != nil {
t.Fatalf("Couldn't set up TLS server: %v", err) t.Fatalf("Couldn't set up TLS server: %v", err)
} }

View file

@ -1,4 +1,4 @@
// Don't flag lines such as "io.Copy(ioutil.Discard, resp.Body)". // Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
io.Copy io.Copy
// The next two are used in HTTP handlers, any error is handled by the server itself. // The next two are used in HTTP handlers, any error is handled by the server itself.
io.WriteString io.WriteString

10
scripts/package_assets.sh Executable file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
# compress static assets
set -euo pipefail
version="$(< VERSION)"
mkdir -p .tarballs
cd web/ui
find static -type f -not -name '*.gz' -print0 | xargs -0 tar czf ../../.tarballs/prometheus-web-ui-${version}.tar.gz

View file

@ -19,7 +19,6 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -213,7 +212,7 @@ func (c *Client) Store(ctx context.Context, req []byte) error {
return RecoverableError{err, defaultBackoff} return RecoverableError{err, defaultBackoff}
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, httpResp.Body) io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close() httpResp.Body.Close()
}() }()
@ -300,13 +299,13 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
return nil, errors.Wrap(err, "error sending request") return nil, errors.Wrap(err, "error sending request")
} }
defer func() { defer func() {
io.Copy(ioutil.Discard, httpResp.Body) io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close() httpResp.Body.Close()
}() }()
c.readQueriesDuration.Observe(time.Since(start).Seconds()) c.readQueriesDuration.Observe(time.Since(start).Seconds())
c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc() c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc()
compressed, err = ioutil.ReadAll(httpResp.Body) compressed, err = io.ReadAll(httpResp.Body)
if err != nil { if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status)) return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status))
} }

View file

@ -16,7 +16,6 @@ package remote
import ( import (
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"sort" "sort"
"strings" "strings"
@ -53,7 +52,7 @@ func (e HTTPError) Status() int {
// DecodeReadRequest reads a remote.Request from a http.Request. // DecodeReadRequest reads a remote.Request from a http.Request.
func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) { func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) {
compressed, err := ioutil.ReadAll(io.LimitReader(r.Body, decodeReadLimit)) compressed, err := io.ReadAll(io.LimitReader(r.Body, decodeReadLimit))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -554,7 +553,7 @@ func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_M
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling // DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression. // snappy decompression.
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) { func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
compressed, err := ioutil.ReadAll(r) compressed, err := io.ReadAll(r)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1116,21 +1116,35 @@ func (q *queue) ReturnForReuse(batch []sampleOrExemplar) {
// FlushAndShutdown stops the queue and flushes any samples. No appends can be // FlushAndShutdown stops the queue and flushes any samples. No appends can be
// made after this is called. // made after this is called.
func (q *queue) FlushAndShutdown(done <-chan struct{}) { func (q *queue) FlushAndShutdown(done <-chan struct{}) {
q.batchMtx.Lock() for q.tryEnqueueingBatch(done) {
defer q.batchMtx.Unlock() time.Sleep(time.Second)
if len(q.batch) > 0 {
select {
case q.batchQueue <- q.batch:
case <-done:
// The shard has been hard shut down, so no more samples can be
// sent. Drop everything left in the queue.
}
} }
q.batch = nil q.batch = nil
close(q.batchQueue) close(q.batchQueue)
} }
// tryEnqueueingBatch tries to send a batch if necessary. If sending needs to
// be retried it will return true.
func (q *queue) tryEnqueueingBatch(done <-chan struct{}) bool {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
if len(q.batch) == 0 {
return false
}
select {
case q.batchQueue <- q.batch:
return false
case <-done:
// The shard has been hard shut down, so no more samples can be sent.
// No need to try again as we will drop everything left in the queue.
return false
default:
// The batchQueue is full, so we need to try again later.
return true
}
}
func (q *queue) newBatch(capacity int) []sampleOrExemplar { func (q *queue) newBatch(capacity int) []sampleOrExemplar {
q.poolMtx.Lock() q.poolMtx.Lock()
defer q.poolMtx.Unlock() defer q.poolMtx.Unlock()

View file

@ -16,7 +16,6 @@ package remote
import ( import (
"context" "context"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"net/url" "net/url"
"os" "os"
@ -824,7 +823,7 @@ func BenchmarkStartup(b *testing.B) {
// Find the second largest segment; we will replay up to this. // Find the second largest segment; we will replay up to this.
// (Second largest as WALWatcher will start tailing the largest). // (Second largest as WALWatcher will start tailing the largest).
dirents, err := ioutil.ReadDir(dir) dirents, err := os.ReadDir(dir)
require.NoError(b, err) require.NoError(b, err)
var segments []int var segments []int
@ -1183,3 +1182,29 @@ func TestQueueManagerMetrics(t *testing.T) {
err = client_testutil.GatherAndCompare(reg, strings.NewReader("")) err = client_testutil.GatherAndCompare(reg, strings.NewReader(""))
require.NoError(t, err) require.NoError(t, err)
} }
func TestQueue_FlushAndShutdownDoesNotDeadlock(t *testing.T) {
capacity := 100
batchSize := 10
queue := newQueue(batchSize, capacity)
for i := 0; i < capacity+batchSize; i++ {
queue.Append(sampleOrExemplar{})
}
done := make(chan struct{})
go queue.FlushAndShutdown(done)
go func() {
// Give enough time for FlushAndShutdown to acquire the lock. queue.Batch()
// should not block forever even if the lock is acquired.
time.Sleep(10 * time.Millisecond)
queue.Batch()
close(done)
}()
select {
case <-done:
case <-time.After(2 * time.Second):
t.Error("Deadlock in FlushAndShutdown detected")
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
t.FailNow()
}
}

View file

@ -16,7 +16,6 @@ package remote
import ( import (
"bytes" "bytes"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
@ -84,7 +83,7 @@ func TestSampledReadEndpoint(t *testing.T) {
require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding")) require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
// Decode the response. // Decode the response.
compressed, err = ioutil.ReadAll(recorder.Result().Body) compressed, err = io.ReadAll(recorder.Result().Body)
require.NoError(t, err) require.NoError(t, err)
uncompressed, err := snappy.Decode(nil, compressed) uncompressed, err := snappy.Decode(nil, compressed)

View file

@ -17,7 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
@ -129,7 +129,7 @@ func TestCommitErr(t *testing.T) {
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
resp := recorder.Result() resp := recorder.Result()
body, err := ioutil.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, http.StatusInternalServerError, resp.StatusCode) require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
require.Equal(t, "commit error\n", string(body)) require.Equal(t, "commit error\n", string(body))

View file

@ -196,6 +196,21 @@ func NewTemplateExpander(
} }
return host return host
}, },
"stripDomain": func(hostPort string) string {
host, port, err := net.SplitHostPort(hostPort)
if err != nil {
host = hostPort
}
ip := net.ParseIP(host)
if ip != nil {
return hostPort
}
host = strings.Split(host, ".")[0]
if port != "" {
return net.JoinHostPort(host, port)
}
return host
},
"humanize": func(i interface{}) (string, error) { "humanize": func(i interface{}) (string, error) {
v, err := convertToFloat(i) v, err := convertToFloat(i)
if err != nil { if err != nil {

View file

@ -480,6 +480,41 @@ func TestTemplateExpansion(t *testing.T) {
text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}", text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}",
output: "3720.01", output: "3720.01",
}, },
{
// Simple hostname.
text: "{{ \"foo.example.com\" | stripDomain }}",
output: "foo",
},
{
// Hostname with port.
text: "{{ \"foo.example.com:12345\" | stripDomain }}",
output: "foo:12345",
},
{
// Simple IPv4 address.
text: "{{ \"192.0.2.1\" | stripDomain }}",
output: "192.0.2.1",
},
{
// IPv4 address with port.
text: "{{ \"192.0.2.1:12345\" | stripDomain }}",
output: "192.0.2.1:12345",
},
{
// Simple IPv6 address.
text: "{{ \"2001:0DB8::1\" | stripDomain }}",
output: "2001:0DB8::1",
},
{
// IPv6 address with port.
text: "{{ \"[2001:0DB8::1]:12345\" | stripDomain }}",
output: "[2001:0DB8::1]:12345",
},
{
// Value can't be split into host and port.
text: "{{ \"[2001:0DB8::1]::12345\" | stripDomain }}",
output: "[2001:0DB8::1]::12345",
},
}) })
} }

View file

@ -183,6 +183,14 @@ func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
opts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(tracingCfg.Endpoint)} opts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(tracingCfg.Endpoint)}
if tracingCfg.Insecure { if tracingCfg.Insecure {
opts = append(opts, otlptracegrpc.WithInsecure()) opts = append(opts, otlptracegrpc.WithInsecure())
} else {
// Use of TLS Credentials forces the use of TLS. Therefore it can
// only be set when `insecure` is set to false.
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
} }
if tracingCfg.Compression != "" { if tracingCfg.Compression != "" {
opts = append(opts, otlptracegrpc.WithCompressor(tracingCfg.Compression)) opts = append(opts, otlptracegrpc.WithCompressor(tracingCfg.Compression))
@ -194,12 +202,6 @@ func getClient(tracingCfg config.TracingConfig) (otlptrace.Client, error) {
opts = append(opts, otlptracegrpc.WithTimeout(time.Duration(tracingCfg.Timeout))) opts = append(opts, otlptracegrpc.WithTimeout(time.Duration(tracingCfg.Timeout)))
} }
tlsConf, err := config_util.NewTLSConfig(&tracingCfg.TLSConfig)
if err != nil {
return nil, err
}
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConf)))
client = otlptracegrpc.NewClient(opts...) client = otlptracegrpc.NewClient(opts...)
case config.TracingClientHTTP: case config.TracingClientHTTP:
opts := []otlptracehttp.Option{otlptracehttp.WithEndpoint(tracingCfg.Endpoint)} opts := []otlptracehttp.Option{otlptracehttp.WithEndpoint(tracingCfg.Endpoint)}

View file

@ -393,7 +393,7 @@ func (db *DB) replayWAL() error {
func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) { func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef) (err error) {
var ( var (
dec record.Decoder dec record.Decoder
lastRef chunks.HeadSeriesRef lastRef = chunks.HeadSeriesRef(db.nextRef.Load())
decoded = make(chan interface{}, 10) decoded = make(chan interface{}, 10)
errCh = make(chan error, 1) errCh = make(chan error, 1)
@ -411,6 +411,7 @@ func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.He
go func() { go func() {
defer close(decoded) defer close(decoded)
var err error
for r.Next() { for r.Next() {
rec := r.Record() rec := r.Record()
switch dec.Type(rec) { switch dec.Type(rec) {
@ -440,6 +441,8 @@ func (db *DB) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.He
decoded <- samples decoded <- samples
case record.Tombstones, record.Exemplars: case record.Tombstones, record.Exemplars:
// We don't care about tombstones or exemplars during replay. // We don't care about tombstones or exemplars during replay.
// TODO: If decide to decode exemplars, we should make sure to prepopulate
// stripeSeries.exemplars in the next block by using setLatestExemplar.
continue continue
default: default:
errCh <- &wal.CorruptionErr{ errCh <- &wal.CorruptionErr{
@ -789,6 +792,16 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
} }
} }
// Check for duplicate vs last stored exemplar for this series, and discard those.
// Otherwise, record the current exemplar as the latest.
// Prometheus' TSDB returns 0 when encountering duplicates, so we do the same here.
prevExemplar := a.series.GetLatestExemplar(s.ref)
if prevExemplar != nil && prevExemplar.Equals(e) {
// Duplicate, don't return an error but don't accept the exemplar.
return 0, nil
}
a.series.SetLatestExemplar(s.ref, &e)
a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{ a.pendingExamplars = append(a.pendingExamplars, record.RefExemplar{
Ref: s.ref, Ref: s.ref,
T: e.Ts, T: e.Ts,
@ -796,6 +809,7 @@ func (a *appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exem
Labels: e.Labels, Labels: e.Labels,
}) })
a.metrics.totalAppendedExemplars.Inc()
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }

View file

@ -15,6 +15,7 @@ package agent
import ( import (
"context" "context"
"fmt"
"path/filepath" "path/filepath"
"strconv" "strconv"
"testing" "testing"
@ -24,6 +25,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
@ -127,7 +129,7 @@ func TestCommit(t *testing.T) {
e := exemplar.Exemplar{ e := exemplar.Exemplar{
Labels: lset, Labels: lset,
Ts: sample[0].T(), Ts: sample[0].T() + int64(i),
Value: sample[0].V(), Value: sample[0].V(),
HasTs: true, HasTs: true,
} }
@ -409,6 +411,41 @@ func TestLockfile(t *testing.T) {
}) })
} }
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil)
defer func() {
require.NoError(t, rs.Close())
}()
db, err := Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
seriesCount := 10
// Append <seriesCount> series
app := db.Appender(context.Background())
for i := 0; i < seriesCount; i++ {
lset := labels.Labels{
{Name: model.MetricNameLabel, Value: fmt.Sprintf("series_%d", i)},
}
_, err := app.Append(0, lset, 0, 100)
require.NoError(t, err)
}
require.NoError(t, app.Commit())
// Truncate the WAL to force creation of a new segment.
require.NoError(t, db.truncate(0))
require.NoError(t, db.Close())
// Create a new storage and see what nextRef is initialized to.
db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err)
defer require.NoError(t, db.Close())
require.Equal(t, uint64(seriesCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
}
func startTime() (int64, error) { func startTime() (int64, error) {
return time.Now().Unix() * 1000, nil return time.Now().Unix() * 1000, nil
} }
@ -445,3 +482,56 @@ func gatherFamily(t *testing.T, reg prometheus.Gatherer, familyName string) *dto
return nil return nil
} }
func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
s := createTestAgentDB(t, nil, DefaultOptions())
app := s.Appender(context.Background())
defer s.Close()
sRef, err := app.Append(0, labels.Labels{{Name: "a", Value: "1"}}, 0, 0)
require.NoError(t, err, "should not reject valid series")
// Write a few exemplars to our appender and call Commit().
// If the Labels, Value or Timestamp are different than the last exemplar,
// then a new one should be appended; Otherwise, it should be skipped.
e := exemplar.Exemplar{Labels: labels.Labels{{Name: "a", Value: "1"}}, Value: 20, Ts: 10, HasTs: true}
_, _ = app.AppendExemplar(sRef, nil, e)
_, _ = app.AppendExemplar(sRef, nil, e)
e.Labels = labels.Labels{{Name: "b", Value: "2"}}
_, _ = app.AppendExemplar(sRef, nil, e)
_, _ = app.AppendExemplar(sRef, nil, e)
_, _ = app.AppendExemplar(sRef, nil, e)
e.Value = 42
_, _ = app.AppendExemplar(sRef, nil, e)
_, _ = app.AppendExemplar(sRef, nil, e)
e.Ts = 25
_, _ = app.AppendExemplar(sRef, nil, e)
_, _ = app.AppendExemplar(sRef, nil, e)
require.NoError(t, app.Commit())
// Read back what was written to the WAL.
var walExemplarsCount int
sr, err := wal.NewSegmentsReader(s.wal.Dir())
require.NoError(t, err)
defer sr.Close()
r := wal.NewReader(sr)
var dec record.Decoder
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Exemplars:
var exemplars []record.RefExemplar
exemplars, err = dec.Exemplars(rec, exemplars)
require.NoError(t, err)
walExemplarsCount += len(exemplars)
}
}
// We had 9 calls to AppendExemplar but only 4 of those should have gotten through.
require.Equal(t, 4, walExemplarsCount)
}

View file

@ -16,6 +16,7 @@ package agent
import ( import (
"sync" "sync"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
) )
@ -89,10 +90,11 @@ func (m seriesHashmap) Delete(hash uint64, ref chunks.HeadSeriesRef) {
// Filling the padded space with the maps was profiled to be slower - // Filling the padded space with the maps was profiled to be slower -
// likely due to the additional pointer dereferences. // likely due to the additional pointer dereferences.
type stripeSeries struct { type stripeSeries struct {
size int size int
series []map[chunks.HeadSeriesRef]*memSeries series []map[chunks.HeadSeriesRef]*memSeries
hashes []seriesHashmap hashes []seriesHashmap
locks []stripeLock exemplars []map[chunks.HeadSeriesRef]*exemplar.Exemplar
locks []stripeLock
gcMut sync.Mutex gcMut sync.Mutex
} }
@ -105,10 +107,11 @@ type stripeLock struct {
func newStripeSeries(stripeSize int) *stripeSeries { func newStripeSeries(stripeSize int) *stripeSeries {
s := &stripeSeries{ s := &stripeSeries{
size: stripeSize, size: stripeSize,
series: make([]map[chunks.HeadSeriesRef]*memSeries, stripeSize), series: make([]map[chunks.HeadSeriesRef]*memSeries, stripeSize),
hashes: make([]seriesHashmap, stripeSize), hashes: make([]seriesHashmap, stripeSize),
locks: make([]stripeLock, stripeSize), exemplars: make([]map[chunks.HeadSeriesRef]*exemplar.Exemplar, stripeSize),
locks: make([]stripeLock, stripeSize),
} }
for i := range s.series { for i := range s.series {
s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} s.series[i] = map[chunks.HeadSeriesRef]*memSeries{}
@ -116,6 +119,9 @@ func newStripeSeries(stripeSize int) *stripeSeries {
for i := range s.hashes { for i := range s.hashes {
s.hashes[i] = seriesHashmap{} s.hashes[i] = seriesHashmap{}
} }
for i := range s.exemplars {
s.exemplars[i] = map[chunks.HeadSeriesRef]*exemplar.Exemplar{}
}
return s return s
} }
@ -154,6 +160,10 @@ func (s *stripeSeries) GC(mint int64) map[chunks.HeadSeriesRef]struct{} {
delete(s.series[refLock], series.ref) delete(s.series[refLock], series.ref)
s.hashes[hashLock].Delete(hash, series.ref) s.hashes[hashLock].Delete(hash, series.ref)
// Since the series is gone, we'll also delete
// the latest stored exemplar.
delete(s.exemplars[refLock], series.ref)
if hashLock != refLock { if hashLock != refLock {
s.locks[refLock].Unlock() s.locks[refLock].Unlock()
} }
@ -201,3 +211,24 @@ func (s *stripeSeries) Set(hash uint64, series *memSeries) {
s.hashes[hashLock].Set(hash, series) s.hashes[hashLock].Set(hash, series)
s.locks[hashLock].Unlock() s.locks[hashLock].Unlock()
} }
func (s *stripeSeries) GetLatestExemplar(ref chunks.HeadSeriesRef) *exemplar.Exemplar {
i := uint64(ref) & uint64(s.size-1)
s.locks[i].RLock()
exemplar := s.exemplars[i][ref]
s.locks[i].RUnlock()
return exemplar
}
func (s *stripeSeries) SetLatestExemplar(ref chunks.HeadSeriesRef, exemplar *exemplar.Exemplar) {
i := uint64(ref) & uint64(s.size-1)
// Make sure that's a valid series id and record its latest exemplar
s.locks[i].Lock()
if s.series[i][ref] != nil {
s.exemplars[i][ref] = exemplar
}
s.locks[i].Unlock()
}

View file

@ -17,7 +17,6 @@ package tsdb
import ( import (
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -201,7 +200,7 @@ const (
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
func readMetaFile(dir string) (*BlockMeta, int64, error) { func readMetaFile(dir string) (*BlockMeta, int64, error) {
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename)) b, err := os.ReadFile(filepath.Join(dir, metaFilename))
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -636,7 +635,7 @@ func (pb *Block) Snapshot(dir string) error {
// Hardlink the chunks // Hardlink the chunks
curChunkDir := chunkDir(pb.dir) curChunkDir := chunkDir(pb.dir)
files, err := ioutil.ReadDir(curChunkDir) files, err := os.ReadDir(curChunkDir)
if err != nil { if err != nil {
return errors.Wrap(err, "ReadDir the current chunk dir") return errors.Wrap(err, "ReadDir the current chunk dir")
} }

View file

@ -15,7 +15,6 @@ package tsdb
import ( import (
"context" "context"
"io/ioutil"
"math" "math"
"os" "os"
@ -64,7 +63,7 @@ func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWrite
// initHead creates and initialises a new TSDB head. // initHead creates and initialises a new TSDB head.
func (w *BlockWriter) initHead() error { func (w *BlockWriter) initHead() error {
chunkDir, err := ioutil.TempDir(os.TempDir(), "head") chunkDir, err := os.MkdirTemp(os.TempDir(), "head")
if err != nil { if err != nil {
return errors.Wrap(err, "create temp dir") return errors.Wrap(err, "create temp dir")
} }

View file

@ -21,7 +21,6 @@ import (
"hash" "hash"
"hash/crc32" "hash/crc32"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -591,7 +590,7 @@ func (s *Reader) Chunk(ref ChunkRef) (chunkenc.Chunk, error) {
} }
func nextSequenceFile(dir string) (string, int, error) { func nextSequenceFile(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }
@ -617,7 +616,7 @@ func segmentFile(baseDir string, index int) string {
} }
func sequenceFiles(dir string) ([]string, error) { func sequenceFiles(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -19,7 +19,6 @@ import (
"encoding/binary" "encoding/binary"
"hash" "hash"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -330,7 +329,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
} }
func listChunkFiles(dir string) (map[int]string, error) { func listChunkFiles(dir string) (map[int]string, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,7 +16,6 @@ package chunks
import ( import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"strconv" "strconv"
@ -124,7 +123,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) require.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers)) require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
actualBytes, err := ioutil.ReadFile(firstFileName) actualBytes, err := os.ReadFile(firstFileName)
require.NoError(t, err) require.NoError(t, err)
// Check header of the segment file. // Check header of the segment file.
@ -202,7 +201,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
verifyFiles := func(remainingFiles []int) { verifyFiles := func(remainingFiles []int) {
t.Helper() t.Helper()
files, err := ioutil.ReadDir(hrw.dir.Name()) files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk") require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
@ -320,7 +319,7 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
verifyFiles := func(remainingFiles []int) { verifyFiles := func(remainingFiles []int) {
t.Helper() t.Helper()
files, err := ioutil.ReadDir(hrw.dir.Name()) files, err := os.ReadDir(hrw.dir.Name())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, len(remainingFiles), len(files), "files on disk") require.Equal(t, len(remainingFiles), len(files), "files on disk")
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
@ -454,7 +453,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
} }
// Removed even from disk. // Removed even from disk.
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 3, len(files)) require.Equal(t, 3, len(files))
for _, fi := range files { for _, fi := range files {

View file

@ -18,7 +18,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/fs"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -761,20 +761,20 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
} }
func removeBestEffortTmpDirs(l log.Logger, dir string) error { func removeBestEffortTmpDirs(l log.Logger, dir string) error {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil return nil
} }
if err != nil { if err != nil {
return err return err
} }
for _, fi := range files { for _, f := range files {
if isTmpDir(fi) { if isTmpDir(f) {
if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil { if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil {
level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, fi.Name()), "err", err) level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err)
continue continue
} }
level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, fi.Name())) level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name()))
} }
} }
return nil return nil
@ -1717,7 +1717,7 @@ func (db *DB) CleanTombstones() (err error) {
return nil return nil
} }
func isBlockDir(fi os.FileInfo) bool { func isBlockDir(fi fs.DirEntry) bool {
if !fi.IsDir() { if !fi.IsDir() {
return false return false
} }
@ -1726,7 +1726,7 @@ func isBlockDir(fi os.FileInfo) bool {
} }
// isTmpDir returns true if the given file-info contains a block ULID or checkpoint prefix and a tmp extension. // isTmpDir returns true if the given file-info contains a block ULID or checkpoint prefix and a tmp extension.
func isTmpDir(fi os.FileInfo) bool { func isTmpDir(fi fs.DirEntry) bool {
if !fi.IsDir() { if !fi.IsDir() {
return false return false
} }
@ -1745,22 +1745,22 @@ func isTmpDir(fi os.FileInfo) bool {
} }
func blockDirs(dir string) ([]string, error) { func blockDirs(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var dirs []string var dirs []string
for _, fi := range files { for _, f := range files {
if isBlockDir(fi) { if isBlockDir(f) {
dirs = append(dirs, filepath.Join(dir, fi.Name())) dirs = append(dirs, filepath.Join(dir, f.Name()))
} }
} }
return dirs, nil return dirs, nil
} }
func sequenceFiles(dir string) ([]string, error) { func sequenceFiles(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1776,7 +1776,7 @@ func sequenceFiles(dir string) ([]string, error) {
} }
func nextSequenceFile(dir string) (string, int, error) { func nextSequenceFile(dir string) (string, int, error) {
files, err := ioutil.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }

View file

@ -20,7 +20,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io/ioutil"
"math" "math"
"math/rand" "math/rand"
"os" "os"
@ -226,7 +225,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
// it is not garbage collected. // it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk. // The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk.
{ {
walFiles, err := ioutil.ReadDir(path.Join(db.Dir(), "wal")) walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err) require.NoError(t, err)
f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666) f, err := os.OpenFile(path.Join(db.Dir(), "wal", walFiles[0].Name()), os.O_RDWR, 0o666)
require.NoError(t, err) require.NoError(t, err)
@ -927,12 +926,14 @@ func TestWALSegmentSizeOptions(t *testing.T) {
tests := map[int]func(dbdir string, segmentSize int){ tests := map[int]func(dbdir string, segmentSize int){
// Default Wal Size. // Default Wal Size.
0: func(dbDir string, segmentSize int) { 0: func(dbDir string, segmentSize int) {
filesAndDir, err := ioutil.ReadDir(filepath.Join(dbDir, "wal")) filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err) require.NoError(t, err)
files := []os.FileInfo{} files := []os.FileInfo{}
for _, f := range filesAndDir { for _, f := range filesAndDir {
if !f.IsDir() { if !f.IsDir() {
files = append(files, f) fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
} }
} }
// All the full segment files (all but the last) should match the segment size option. // All the full segment files (all but the last) should match the segment size option.
@ -944,12 +945,14 @@ func TestWALSegmentSizeOptions(t *testing.T) {
}, },
// Custom Wal Size. // Custom Wal Size.
2 * 32 * 1024: func(dbDir string, segmentSize int) { 2 * 32 * 1024: func(dbDir string, segmentSize int) {
filesAndDir, err := ioutil.ReadDir(filepath.Join(dbDir, "wal")) filesAndDir, err := os.ReadDir(filepath.Join(dbDir, "wal"))
require.NoError(t, err) require.NoError(t, err)
files := []os.FileInfo{} files := []os.FileInfo{}
for _, f := range filesAndDir { for _, f := range filesAndDir {
if !f.IsDir() { if !f.IsDir() {
files = append(files, f) fi, err := f.Info()
require.NoError(t, err)
files = append(files, fi)
} }
} }
require.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.") require.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.")
@ -2707,7 +2710,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
} }
require.NoError(t, chunkw.Close()) require.NoError(t, chunkw.Close())
files, err := ioutil.ReadDir(tempDir) files, err := os.ReadDir(tempDir)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.expSegmentsCount, len(files), "expected segments count mismatch") require.Equal(t, test.expSegmentsCount, len(files), "expected segments count mismatch")
@ -2727,7 +2730,9 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
sizeExp += test.expSegmentsCount * chunks.SegmentHeaderSize // The segment header bytes. sizeExp += test.expSegmentsCount * chunks.SegmentHeaderSize // The segment header bytes.
for i, f := range files { for i, f := range files {
size := int(f.Size()) fi, err := f.Info()
require.NoError(t, err)
size := int(fi.Size())
// Verify that the segment is the same or smaller than the expected size. // Verify that the segment is the same or smaller than the expected size.
require.GreaterOrEqual(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size) require.GreaterOrEqual(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size)
@ -2878,7 +2883,7 @@ func TestCompactHead(t *testing.T) {
} }
func deleteNonBlocks(dbDir string) error { func deleteNonBlocks(dbDir string) error {
dirs, err := ioutil.ReadDir(dbDir) dirs, err := os.ReadDir(dbDir)
if err != nil { if err != nil {
return err return err
} }
@ -2889,7 +2894,7 @@ func deleteNonBlocks(dbDir string) error {
} }
} }
} }
dirs, err = ioutil.ReadDir(dbDir) dirs, err = os.ReadDir(dbDir)
if err != nil { if err != nil {
return err return err
} }
@ -2996,7 +3001,7 @@ func TestOpen_VariousBlockStates(t *testing.T) {
require.Equal(t, len(expectedLoadedDirs), loaded) require.Equal(t, len(expectedLoadedDirs), loaded)
require.NoError(t, db.Close()) require.NoError(t, db.Close())
files, err := ioutil.ReadDir(tmpDir) files, err := os.ReadDir(tmpDir)
require.NoError(t, err) require.NoError(t, err)
var ignored int var ignored int

Some files were not shown because too many files have changed in this diff Show more