Merge branch 'main' into nhcb

Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
This commit is contained in:
Jeanette Tan 2024-05-08 19:11:39 +08:00
commit 796b1bbfde
69 changed files with 1885 additions and 970 deletions

View file

@ -12,7 +12,7 @@ jobs:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -13,7 +13,7 @@ jobs:
# should also be updated. # should also be updated.
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
@ -27,7 +27,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./... - run: go test --tags=dedupelabels ./...
@ -43,7 +43,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go. # The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.21-base image: quay.io/prometheus/golang-builder:1.21-base
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: make build - run: make build
# Don't run NPM build; don't run race-detector. # Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags="" - run: make test GO_ONLY=1 test-flags=""
@ -57,7 +57,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
@ -74,7 +74,7 @@ jobs:
name: Go tests on Windows name: Go tests on Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
go-version: 1.22.x go-version: 1.22.x
@ -91,7 +91,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: go install ./cmd/promtool/. - run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -114,7 +114,7 @@ jobs:
matrix: matrix:
thread: [ 0, 1, 2 ] thread: [ 0, 1, 2 ]
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
@ -137,18 +137,31 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
parallelism: 12 parallelism: 12
thread: ${{ matrix.thread }} thread: ${{ matrix.thread }}
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
cache: false
go-version: 1.22.x
- name: Run goyacc and check for diff
run: make install-goyacc check-generated-parser
golangci: golangci:
name: golangci-lint name: golangci-lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go - name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
@ -175,7 +188,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_main - uses: ./.github/promci/actions/publish_main
with: with:
@ -189,7 +202,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_release - uses: ./.github/promci/actions/publish_release
with: with:
@ -204,7 +217,7 @@ jobs:
needs: [test_ui, codeql] needs: [test_ui, codeql]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- name: Install nodejs - name: Install nodejs
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2

View file

@ -24,7 +24,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12

View file

@ -17,7 +17,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set docker hub repo name - name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub - name: Push README to Dockerhub
@ -37,7 +37,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set quay.io org name - name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name - name: Set quay.io repo name

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600 fuzz-seconds: 600
dry-run: false dry-run: false
- name: Upload Crash - name: Upload Crash
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure() && steps.build.outcome == 'success' if: failure() && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder image: quay.io/prometheus/golang-builder
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: ./scripts/sync_repo_files.sh - run: ./scripts/sync_repo_files.sh
env: env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: "Checkout code" - name: "Checkout code"
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # tag=v4.1.2 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # tag=v4.1.4
with: with:
persist-credentials: false persist-credentials: false
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab. # format to the repository Actions tab.
- name: "Upload artifact" - name: "Upload artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # tag=v4.3.1 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
with: with:
name: SARIF file name: SARIF file
path: results.sarif path: results.sarif

View file

@ -2,7 +2,44 @@
## unreleased ## unreleased
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991
## 2.52.0-rc.1 / 2024-05-03
* [BUGFIX] API: Fix missing comma during JSON encoding of API results. #14047
## 2.52.0-rc.0 / 2024-04-22
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
* [FEATURE] Alerting: Support native histogram templating. #13731
* [FEATURE] Linode SD: Support IPv6 range discovery and region filtering. #13774
* [ENHANCEMENT] PromQL: Performance improvements for queries with regex matchers. #13461
* [ENHANCEMENT] PromQL: Performance improvements when using aggregation operators. #13744
* [ENHANCEMENT] PromQL: Validate label_join destination label. #13803
* [ENHANCEMENT] Scrape: Increment `prometheus_target_scrapes_sample_duplicate_timestamp_total` metric on duplicated series during one scrape. #12933
* [ENHANCEMENT] TSDB: Many improvements in performance. #13742 #13673 #13782
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
* [BUGFIX] PromQL: Fix possible duplicated label name and values in a metric result for specific queries. #13845
* [BUGFIX] Scrape: Fix setting native histogram schema factor during scrape. #13846
* [BUGFIX] TSDB: Fix counting of histogram samples when creating WAL checkpoint stats. #13776
* [BUGFIX] TSDB: Fix cases of compacting empty heads. #13755
* [BUGFIX] TSDB: Count float histograms in WAL checkpoint. #13844
* [BUGFIX] Remote Read: Fix memory leak due to broken requests. #13777
* [BUGFIX] API: Stop building response for `/api/v1/series/` when the API request was cancelled. #13766
* [BUGFIX] promtool: Fix panic on `promtool tsdb analyze --extended` when no native histograms are present. #13976
## 2.51.2 / 2024-04-09 ## 2.51.2 / 2024-04-09

View file

@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 4m GOLANGCI_LINT_OPTS ?= --timeout 4m
GOYACC_VERSION ?= v0.6.0
include Makefile.common include Makefile.common
@ -78,24 +79,42 @@ assets-tarball: assets
@echo '>> packaging assets' @echo '>> packaging assets'
scripts/package_assets.sh scripts/package_assets.sh
# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser .PHONY: parser
parser: parser:
@echo ">> running goyacc to generate the .go file." @echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc 2> /dev/null)) ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping" @echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" @echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\""
else else
goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y $(MAKE) promql/parser/generated_parser.y.go
endif endif
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
@echo ">> running goyacc to generate the .go file."
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
.PHONY: clean-parser
clean-parser:
@echo ">> cleaning generated parser"
@rm -f promql/parser/generated_parser.y.go
.PHONY: check-generated-parser
check-generated-parser: clean-parser promql/parser/generated_parser.y.go
@echo ">> checking generated parser"
@git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false)
.PHONY: install-goyacc
install-goyacc:
@echo ">> installing goyacc $(GOYACC_VERSION)"
@go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)
.PHONY: test .PHONY: test
# If we only want to only test go code we have to change the test target # If we only want to only test go code we have to change the test target
# which is called by all. # which is called by all.
ifeq ($(GO_ONLY),1) ifeq ($(GO_ONLY),1)
test: common-test check-go-mod-version test: common-test check-go-mod-version
else else
test: common-test ui-build-module ui-test ui-lint check-go-mod-version test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif endif
.PHONY: npm_licenses .PHONY: npm_licenses

View file

@ -55,7 +55,7 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),)
endif endif
endif endif
PROMU_VERSION ?= 0.15.0 PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=

View file

@ -1 +1 @@
2.51.2 2.52.0-rc.1

View file

@ -217,6 +217,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms": case "native-histograms":
c.tsdb.EnableNativeHistograms = true c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols

View file

@ -19,16 +19,6 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
func init() {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
}
// Metrics to be used with a discovery manager. // Metrics to be used with a discovery manager.
type Metrics struct { type Metrics struct {
FailedConfigs prometheus.Gauge FailedConfigs prometheus.Gauge

View file

@ -35,6 +35,11 @@ const (
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
) )
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
var ( var (
// Metrics for client-go's HTTP requests. // Metrics for client-go's HTTP requests.
clientGoRequestResultMetricVec = prometheus.NewCounterVec( clientGoRequestResultMetricVec = prometheus.NewCounterVec(
@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector {
} }
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
for _, collector := range clientGoMetrics() { for _, collector := range clientGoMetrics() {
err := registerer.Register(collector) err := registerer.Register(collector)
if err != nil { if err != nil {

View file

@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
labels[instanceTagsLabel] = model.LabelValue(tags) labels[instanceTagsLabel] = model.LabelValue(tags)
} }
addr := ""
if server.IPv6 != nil { if server.IPv6 != nil {
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
addr = server.IPv6.Address.String()
} }
if server.PublicIP != nil { if server.PublicIP != nil {
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
addr = server.PublicIP.Address.String()
} }
if server.PrivateIP != nil { if server.PrivateIP != nil {
labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
addr = *server.PrivateIP
}
addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10)) if addr != "" {
addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
targets = append(targets, labels) targets = append(targets, labels)
} }
} }

View file

@ -60,7 +60,7 @@ api_url: %s
tg := tgs[0] tg := tgs[0]
require.NotNil(t, tg) require.NotNil(t, tg)
require.NotNil(t, tg.Targets) require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 2) require.Len(t, tg.Targets, 3)
for i, lbls := range []model.LabelSet{ for i, lbls := range []model.LabelSet{
{ {
@ -110,6 +110,28 @@ api_url: %s
"__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "fr-par-1", "__meta_scaleway_instance_zone": "fr-par-1",
}, },
{
"__address__": "51.158.183.115:80",
"__meta_scaleway_instance_boot_type": "local",
"__meta_scaleway_instance_hostname": "routed-dualstack",
"__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a",
"__meta_scaleway_instance_image_arch": "x86_64",
"__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish",
"__meta_scaleway_instance_location_cluster_id": "19",
"__meta_scaleway_instance_location_hypervisor_id": "1201",
"__meta_scaleway_instance_location_node_id": "24",
"__meta_scaleway_instance_name": "routed-dualstack",
"__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_public_ipv4": "51.158.183.115",
"__meta_scaleway_instance_region": "nl-ams",
"__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092",
"__meta_scaleway_instance_security_group_name": "Default security group",
"__meta_scaleway_instance_status": "running",
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "nl-ams-1",
},
} { } {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i]) require.Equal(t, lbls, tg.Targets[i])

View file

@ -216,6 +216,146 @@
"placement_group": null, "placement_group": null,
"private_nics": [], "private_nics": [],
"zone": "fr-par-1" "zone": "fr-par-1"
},
{
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack",
"arch": "x86_64",
"commercial_type": "DEV1-S",
"boot_type": "local",
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"hostname": "routed-dualstack",
"image": {
"id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"project": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"root_volume": {
"id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "unified",
"size": 10000000000
},
"extra_volumes": {},
"public": true,
"arch": "x86_64",
"creation_date": "2024-02-22T15:52:56.037007+00:00",
"modification_date": "2024-02-22T15:52:56.037007+00:00",
"default_bootscript": null,
"from_server": null,
"state": "available",
"tags": [],
"zone": "nl-ams-1"
},
"volumes": {
"0": {
"boot": false,
"id": "fe85c817-e67e-4e24-8f13-bde3e9f42620",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "l_ssd",
"export_uri": null,
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"server": {
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack"
},
"size": 20000000000,
"state": "available",
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:50:14.019739+00:00",
"tags": [],
"zone": "nl-ams-1"
}
},
"tags": [],
"state": "running",
"protected": false,
"state_detail": "booted",
"public_ip": {
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
"public_ips": [
{
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
{
"id": "f52a8c81-0875-4aee-b96e-eccfc6bec367",
"address": "2001:bc8:1640:1568:dc00:ff:fe21:91b",
"dynamic": false,
"gateway": "fe80::dc00:ff:fe21:91c",
"netmask": "64",
"family": "inet6",
"provisioning_mode": "slaac",
"tags": [],
"state": "attached",
"ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6"
}
],
"mac_address": "de:00:00:21:09:1b",
"routed_ip_enabled": true,
"ipv6": null,
"extra_networks": [],
"dynamic_ip_required": false,
"enable_ipv6": false,
"private_ip": null,
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:52:21.181670+00:00",
"bootscript": {
"id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe",
"public": true,
"title": "x86_64 mainline 4.4.182 rev1",
"architecture": "x86_64",
"organization": "11111111-1111-4111-8111-111111111111",
"project": "11111111-1111-4111-8111-111111111111",
"kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182",
"dtb": "",
"initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"zone": "nl-ams-1"
},
"security_group": {
"id": "984414da-9fc2-49c0-a925-fed6266fe092",
"name": "Default security group"
},
"location": {
"zone_id": "ams1",
"platform_id": "23",
"cluster_id": "19",
"hypervisor_id": "1201",
"node_id": "24"
},
"maintenances": [],
"allowed_actions": [
"poweroff",
"terminate",
"reboot",
"stop_in_place",
"backup"
],
"placement_group": null,
"private_nics": [],
"zone": "nl-ams-1"
} }
] ]
} }

View file

@ -2952,9 +2952,10 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_scaleway_instance_type`: commercial type of the server * `__meta_scaleway_instance_type`: commercial type of the server
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the private IPv4 address by default. This can be This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml). configuration file](/documentation/examples/prometheus-scaleway.yml).
Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it.
#### Baremetal role #### Baremetal role

View file

@ -5,63 +5,7 @@ sort_rank: 7
# Remote Read API # Remote Read API
This is not currently considered part of the stable API and is subject to change > This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus.
even between non-major version releases of Prometheus.
## Format overview
The API response format is JSON. Every successful API request returns a `2xx`
status code.
Invalid requests that reach the API handlers return a JSON error object
and one of the following HTTP response codes:
- `400 Bad Request` when parameters are missing or incorrect.
- `422 Unprocessable Entity` when an expression can't be executed
([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)).
- `503 Service Unavailable` when queries time out or abort.
Other non-`2xx` codes may be returned for errors occurring before the API
endpoint is reached.
An array of warnings may be returned if there are errors that do
not inhibit the request execution. All of the data that was successfully
collected will be returned in the data field.
The JSON response envelope format is as follows:
```
{
"status": "success" | "error",
"data": <data>,
// Only set if status is "error". The data field may still hold
// additional data.
"errorType": "<string>",
"error": "<string>",
// Only if there were warnings while executing the request.
// There will still be data in the data field.
"warnings": ["<string>"]
}
```
Generic placeholders are defined as follows:
* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp
in seconds, with optional decimal places for sub-second precision. Output
timestamps are always represented as Unix timestamps in seconds.
* `<series_selector>`: Prometheus [time series
selectors](basics.md#time-series-selectors) like `http_requests_total` or
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
* `<duration>`: [Prometheus duration strings](basics.md#time_durations).
For example, `5m` refers to a duration of 5 minutes.
* `<bool>`: boolean values (strings `true` and `false`).
Note: Names of query parameters that may be repeated end with `[]`.
## Remote Read API
This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression.
The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples.
These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf)
compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second.

View file

@ -84,8 +84,10 @@ or 31 days, whichever is smaller.
Prometheus has several flags that configure local storage. The most important are: Prometheus has several flags that configure local storage. The most important are:
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. - `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
Overrides `storage.tsdb.retention` if this flag is set to anything other than default. set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
The oldest data will be removed first. Defaults to `0` or disabled. Units supported: The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only

View file

@ -10,7 +10,7 @@ require (
github.com/influxdata/influxdb v1.11.5 github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.53.0 github.com/prometheus/common v0.53.0
github.com/prometheus/prometheus v0.51.1 github.com/prometheus/prometheus v0.51.2
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )

View file

@ -279,8 +279,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.51.1 h1:V2e7x2oiUC0Megp26+xjffxBf9EGkyP1iQuGd4VjUSU= github.com/prometheus/prometheus v0.51.2 h1:U0faf1nT4CB9DkBW87XLJCBi2s8nwWXdTbyzRUAkX0w=
github.com/prometheus/prometheus v0.51.1/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc= github.com/prometheus/prometheus v0.51.2/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM=

12
go.mod
View file

@ -41,7 +41,7 @@ require (
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.8 github.com/klauspost/compress v1.17.8
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.32.0 github.com/linode/linodego v1.33.0
github.com/miekg/dns v1.1.59 github.com/miekg/dns v1.1.59
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -80,10 +80,10 @@ require (
golang.org/x/sys v0.19.0 golang.org/x/sys v0.19.0
golang.org/x/time v0.5.0 golang.org/x/time v0.5.0
golang.org/x/tools v0.20.0 golang.org/x/tools v0.20.0
google.golang.org/api v0.174.0 google.golang.org/api v0.177.0
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be
google.golang.org/grpc v1.63.2 google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0 google.golang.org/protobuf v1.34.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3 k8s.io/api v0.29.3
@ -94,8 +94,8 @@ require (
) )
require ( require (
cloud.google.com/go/auth v0.2.0 // indirect cloud.google.com/go/auth v0.3.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
@ -191,7 +191,7 @@ require (
golang.org/x/mod v0.17.0 // indirect golang.org/x/mod v0.17.0 // indirect
golang.org/x/term v0.19.0 // indirect golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gotest.tools/v3 v3.0.3 // indirect

24
go.sum
View file

@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.2.0 h1:y6oTcpMSbOcXbwYgUUrvI+mrQ2xbrcdpPgtVbCGTLTk= cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs=
cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU= cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w=
cloud.google.com/go/auth/oauth2adapt v0.2.0 h1:FR8zevgQwu+8CqiOT5r6xCmJa3pJC/wdXEEPF1OkNhA= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -471,8 +471,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI= github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw=
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI= github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -1045,8 +1045,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.174.0 h1:zB1BWl7ocxfTea2aQ9mgdzXjnfPySllpPOskdnO+q34= google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk=
google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg= google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1085,8 +1085,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1118,8 +1118,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -16,6 +16,7 @@ package labels
import ( import (
"slices" "slices"
"strings" "strings"
"unicode/utf8"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/grafana/regexp/syntax" "github.com/grafana/regexp/syntax"
@ -827,8 +828,7 @@ type zeroOrOneCharacterStringMatcher struct {
} }
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// Zero or one. if moreThanOneRune(s) {
if len(s) > 1 {
return false return false
} }
@ -840,6 +840,27 @@ func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
return s[0] != '\n' return s[0] != '\n'
} }
// moreThanOneRune returns true if there are more than one runes in the string.
// It doesn't check whether the string is valid UTF-8.
// The return value should be always equal to utf8.RuneCountInString(s) > 1,
// but the function is optimized for the common case where the string prefix is ASCII.
func moreThanOneRune(s string) bool {
// If len(s) is exactly one or zero, there can't be more than one rune.
// Exit through this path quickly.
if len(s) <= 1 {
return false
}
// There's one or more bytes:
// If first byte is ASCII then there are multiple runes if there are more bytes after that.
if s[0] < utf8.RuneSelf {
return len(s) > 1
}
// Less common case: first is a multibyte rune.
return utf8.RuneCountInString(s) > 1
}
// trueMatcher is a stringMatcher which matches any string (always returns true). // trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{} type trueMatcher struct{}

View file

@ -84,7 +84,7 @@ var (
"foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "", "foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "",
"FOO", "Foo", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", "FOO", "Foo", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo",
"10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40", "10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40",
"foofoo0", "foofoo", "foofoo0", "foofoo", "😀foo0",
// Values matching / not matching the test regexps on long alternations. // Values matching / not matching the test regexps on long alternations.
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",

View file

@ -2027,25 +2027,21 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
vec := make(Vector, 0, len(vs.Series)) vec := make(Vector, 0, len(vs.Series))
for i, s := range vs.Series { for i, s := range vs.Series {
it := seriesIterators[i] it := seriesIterators[i]
t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts)
if ok { if !ok {
vec = append(vec, Sample{ continue
Metric: s.Labels(), }
T: t,
F: f,
H: h,
})
histSize := 0
if h != nil {
histSize := h.Size() / 16 // 16 bytes per sample.
ev.currentSamples += histSize
}
ev.currentSamples++
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, int64(1+histSize)) // Note that we ignore the sample values because call only cares about the timestamp.
if ev.currentSamples > ev.maxSamples { vec = append(vec, Sample{
ev.error(ErrTooManySamples(env)) Metric: s.Labels(),
} T: t,
})
ev.currentSamples++
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
} }
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)

View file

@ -818,8 +818,8 @@ load 10s
{ {
Query: "timestamp(metricWith1HistogramEvery10Seconds)", Query: "timestamp(metricWith1HistogramEvery10Seconds)",
Start: time.Unix(21, 0), Start: time.Unix(21, 0),
PeakSamples: 15, // histogram size 13 + 1 extra because Sample overhead + 1 float result PeakSamples: 2,
TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 1, 21000: 1,
}, },
@ -1116,8 +1116,8 @@ load 10s
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
End: time.Unix(220, 0), End: time.Unix(220, 0),
Interval: 5 * time.Second, Interval: 5 * time.Second,
PeakSamples: 18, // 13 histogram size + 1 extra because of Sample overhead + 4 float results PeakSamples: 5,
TotalSamples: 4, // 1 sample per query * 4 steps TotalSamples: 4, // 1 sample per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 1, 201000: 1,
206000: 1, 206000: 1,

View file

@ -204,8 +204,8 @@ func (node *VectorSelector) String() string {
labelStrings = make([]string, 0, len(node.LabelMatchers)-1) labelStrings = make([]string, 0, len(node.LabelMatchers)-1)
} }
for _, matcher := range node.LabelMatchers { for _, matcher := range node.LabelMatchers {
// Only include the __name__ label if its equality matching and matches the name. // Only include the __name__ label if its equality matching and matches the name, but don't skip if it's an explicit empty name matcher.
if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name { if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name && matcher.Value != "" {
continue continue
} }
labelStrings = append(labelStrings, matcher.String()) labelStrings = append(labelStrings, matcher.String())

View file

@ -135,6 +135,9 @@ func TestExprString(t *testing.T) {
{ {
in: `a[1m] @ end()`, in: `a[1m] @ end()`,
}, },
{
in: `{__name__="",a="x"}`,
},
} }
for _, test := range inputs { for _, test := range inputs {
@ -216,6 +219,16 @@ func TestVectorSelector_String(t *testing.T) {
}, },
expected: `{__name__="foobar"}`, expected: `{__name__="foobar"}`,
}, },
{
name: "empty name matcher",
vs: VectorSelector{
LabelMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, ""),
labels.MustNewMatcher(labels.MatchEqual, "a", "x"),
},
},
expected: `{__name__="",a="x"}`,
},
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected, tc.vs.String()) require.Equal(t, tc.expected, tc.vs.String())

View file

@ -96,12 +96,14 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
err = file.Truncate(int64(filesize)) err = file.Truncate(int64(filesize))
if err != nil { if err != nil {
file.Close()
level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }
fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0)
if err != nil { if err != nil {
file.Close()
level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }

View file

@ -110,10 +110,7 @@ func TestMMapFile(t *testing.T) {
filename := file.Name() filename := file.Name()
defer os.Remove(filename) defer os.Remove(filename)
fileAsBytes, closer, err := getMMapedFile(filename, 2, nil) fileAsBytes, _, err := getMMapedFile(filename, 2, nil)
if err != nil {
t.Cleanup(func() { closer.Close() })
}
require.NoError(t, err) require.NoError(t, err)
copy(fileAsBytes, "ab") copy(fileAsBytes, "ab")

View file

@ -246,13 +246,16 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
return s return s
} }
// forStateSample returns the sample for ALERTS_FOR_STATE. // forStateSample returns a promql.Sample with the rule labels, `ALERTS_FOR_STATE` as the metric name and the rule name as the `alertname` label.
// Optionally, if an alert is provided it'll copy the labels of the alert into the sample labels.
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample { func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
lb := labels.NewBuilder(r.labels) lb := labels.NewBuilder(r.labels)
alert.Labels.Range(func(l labels.Label) { if alert != nil {
lb.Set(l.Name, l.Value) alert.Labels.Range(func(l labels.Label) {
}) lb.Set(l.Name, l.Value)
})
}
lb.Set(labels.MetricName, alertForStateMetricName) lb.Set(labels.MetricName, alertForStateMetricName)
lb.Set(labels.AlertName, r.name) lb.Set(labels.AlertName, r.name)
@ -265,9 +268,11 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro
return s return s
} }
// QueryforStateSeries returns the series for ALERTS_FOR_STATE. // QueryForStateSeries returns the series for ALERTS_FOR_STATE of the alert rule.
func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q storage.Querier) (storage.Series, error) { func (r *AlertingRule) QueryForStateSeries(ctx context.Context, q storage.Querier) (storage.SeriesSet, error) {
smpl := r.forStateSample(alert, time.Now(), 0) // We use a sample to ease the building of matchers.
// Don't provide an alert as we want matchers that match all series for the alert rule.
smpl := r.forStateSample(nil, time.Now(), 0)
var matchers []*labels.Matcher var matchers []*labels.Matcher
smpl.Metric.Range(func(l labels.Label) { smpl.Metric.Range(func(l labels.Label) {
mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value) mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
@ -276,20 +281,9 @@ func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q
} }
matchers = append(matchers, mt) matchers = append(matchers, mt)
}) })
sset := q.Select(ctx, false, nil, matchers...) sset := q.Select(ctx, false, nil, matchers...)
return sset, sset.Err()
var s storage.Series
for sset.Next() {
// Query assures that smpl.Metric is included in sset.At().Labels(),
// hence just checking the length would act like equality.
// (This is faster than calling labels.Compare again as we already have some info).
if sset.At().Labels().Len() == len(matchers) {
s = sset.At()
break
}
}
return s, sset.Err()
} }
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. // SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
@ -457,8 +451,17 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
} }
} }
// If the alert was previously firing, keep it around for a given // If the alert is resolved (was firing but is now inactive) keep it for
// retention time so it is reported as resolved to the AlertManager. // at least the retention period. This is important for a number of reasons:
//
// 1. It allows for Prometheus to be more resilient to network issues that
// would otherwise prevent a resolved alert from being reported as resolved
// to Alertmanager.
//
// 2. It helps reduce the chance of resolved notifications being lost if
// Alertmanager crashes or restarts between receiving the resolved alert
// from Prometheus and sending the resolved notification. This tends to
// occur for routes with large Group intervals.
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
delete(r.active, fp) delete(r.active, fp)
} }
@ -548,6 +551,13 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
} }
} }
func (r *AlertingRule) ActiveAlertsCount() int {
r.activeMtx.Lock()
defer r.activeMtx.Unlock()
return len(r.active)
}
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) { func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) {
alerts := []*Alert{} alerts := []*Alert{}
r.ForEachActiveAlert(func(alert *Alert) { r.ForEachActiveAlert(func(alert *Alert) {

View file

@ -710,19 +710,17 @@ func TestQueryForStateSeries(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
) )
alert := &Alert{ sample := rule.forStateSample(nil, time.Time{}, 0)
State: 0,
Labels: labels.EmptyLabels(),
Annotations: labels.EmptyLabels(),
Value: 0,
ActiveAt: time.Time{},
FiredAt: time.Time{},
ResolvedAt: time.Time{},
LastSentAt: time.Time{},
ValidUntil: time.Time{},
}
series, err := rule.QueryforStateSeries(context.Background(), alert, querier) seriesSet, err := rule.QueryForStateSeries(context.Background(), querier)
var series storage.Series
for seriesSet.Next() {
if seriesSet.At().Labels().Len() == sample.Metric.Len() {
series = seriesSet.At()
break
}
}
require.Equal(t, tst.expectedSeries, series) require.Equal(t, tst.expectedSeries, series)
require.Equal(t, tst.expectedError, err) require.Equal(t, tst.expectedError, err)
@ -1025,3 +1023,24 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) {
rule.SetNoDependencyRules(true) rule.SetNoDependencyRules(true)
require.True(t, rule.NoDependencyRules()) require.True(t, rule.NoDependencyRules())
} }
func TestAlertingRule_ActiveAlertsCount(t *testing.T) {
rule := NewAlertingRule(
"TestRule",
nil,
time.Minute,
0,
labels.FromStrings("severity", "critical"),
labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil,
)
require.Equal(t, 0, rule.ActiveAlertsCount())
// Set an active alert.
lbls := labels.FromStrings("a1", "1")
h := lbls.Hash()
al := &Alert{State: StateFiring, Labels: lbls, ActiveAt: time.Now()}
rule.active[h] = al
require.Equal(t, 1, rule.ActiveAlertsCount())
}

View file

@ -230,7 +230,11 @@ func (g *Group) run(ctx context.Context) {
g.evalIterationFunc(ctx, g, evalTimestamp) g.evalIterationFunc(ctx, g, evalTimestamp)
} }
g.RestoreForState(time.Now()) restoreStartTime := time.Now()
g.RestoreForState(restoreStartTime)
totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds()
g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds)
level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds)
g.shouldRestore = false g.shouldRestore = false
} }
@ -660,25 +664,40 @@ func (g *Group) RestoreForState(ts time.Time) {
continue continue
} }
sset, err := alertRule.QueryForStateSeries(g.opts.Context, q)
if err != nil {
level.Error(g.logger).Log(
"msg", "Failed to restore 'for' state",
labels.AlertName, alertRule.Name(),
"stage", "Select",
"err", err,
)
// Even if we failed to query the `ALERT_FOR_STATE` series, we currently have no way to retry the restore process.
// So the best we can do is mark the rule as restored and let it eventually fire.
alertRule.SetRestored(true)
continue
}
// While not technically the same number of series we expect, it's as good of an approximation as any.
seriesByLabels := make(map[string]storage.Series, alertRule.ActiveAlertsCount())
for sset.Next() {
seriesByLabels[sset.At().Labels().DropMetricName().String()] = sset.At()
}
// No results for this alert rule.
if len(seriesByLabels) == 0 {
level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name())
alertRule.SetRestored(true)
continue
}
alertRule.ForEachActiveAlert(func(a *Alert) { alertRule.ForEachActiveAlert(func(a *Alert) {
var s storage.Series var s storage.Series
s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q) s, ok := seriesByLabels[a.Labels.String()]
if err != nil { if !ok {
// Querier Warnings are ignored. We do not care unless we have an error.
level.Error(g.logger).Log(
"msg", "Failed to restore 'for' state",
labels.AlertName, alertRule.Name(),
"stage", "Select",
"err", err,
)
return return
} }
if s == nil {
return
}
// Series found for the 'for' state. // Series found for the 'for' state.
var t int64 var t int64
var v float64 var v float64
@ -779,17 +798,18 @@ const namespace = "prometheus"
// Metrics for rule evaluation. // Metrics for rule evaluation.
type Metrics struct { type Metrics struct {
EvalDuration prometheus.Summary EvalDuration prometheus.Summary
IterationDuration prometheus.Summary IterationDuration prometheus.Summary
IterationsMissed *prometheus.CounterVec IterationsMissed *prometheus.CounterVec
IterationsScheduled *prometheus.CounterVec IterationsScheduled *prometheus.CounterVec
EvalTotal *prometheus.CounterVec EvalTotal *prometheus.CounterVec
EvalFailures *prometheus.CounterVec EvalFailures *prometheus.CounterVec
GroupInterval *prometheus.GaugeVec GroupInterval *prometheus.GaugeVec
GroupLastEvalTime *prometheus.GaugeVec GroupLastEvalTime *prometheus.GaugeVec
GroupLastDuration *prometheus.GaugeVec GroupLastDuration *prometheus.GaugeVec
GroupRules *prometheus.GaugeVec GroupLastRestoreDuration *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec GroupRules *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec
} }
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer, // NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
@ -865,6 +885,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
GroupLastRestoreDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_restore_duration_seconds",
Help: "The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series.",
},
[]string{"rule_group"},
),
GroupRules: prometheus.NewGaugeVec( GroupRules: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
@ -894,6 +922,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
m.GroupInterval, m.GroupInterval,
m.GroupLastEvalTime, m.GroupLastEvalTime,
m.GroupLastDuration, m.GroupLastDuration,
m.GroupLastRestoreDuration,
m.GroupRules, m.GroupRules,
m.GroupSamples, m.GroupSamples,
) )

View file

@ -397,122 +397,123 @@ func TestForStateRestore(t *testing.T) {
group.Eval(context.TODO(), evalTime) group.Eval(context.TODO(), evalTime)
} }
exp := rule.ActiveAlerts()
for _, aa := range exp {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(exp, func(i, j int) bool {
return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
})
// Prometheus goes down here. We create new rules and groups. // Prometheus goes down here. We create new rules and groups.
type testInput struct { type testInput struct {
name string
restoreDuration time.Duration restoreDuration time.Duration
alerts []*Alert expectedAlerts []*Alert
num int num int
noRestore bool noRestore bool
gracePeriod bool gracePeriod bool
downDuration time.Duration downDuration time.Duration
before func()
} }
tests := []testInput{ tests := []testInput{
{ {
// Normal restore (alerts were not firing). name: "normal restore (alerts were not firing)",
restoreDuration: 15 * time.Minute, restoreDuration: 15 * time.Minute,
alerts: rule.ActiveAlerts(), expectedAlerts: rule.ActiveAlerts(),
downDuration: 10 * time.Minute, downDuration: 10 * time.Minute,
}, },
{ {
// Testing Outage Tolerance. name: "outage tolerance",
restoreDuration: 40 * time.Minute, restoreDuration: 40 * time.Minute,
noRestore: true, noRestore: true,
num: 2, num: 2,
}, },
{ {
// No active alerts. name: "no active alerts",
restoreDuration: 50 * time.Minute, restoreDuration: 50 * time.Minute,
alerts: []*Alert{}, expectedAlerts: []*Alert{},
},
{
name: "test the grace period",
restoreDuration: 25 * time.Minute,
expectedAlerts: []*Alert{},
gracePeriod: true,
before: func() {
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
},
num: 2,
}, },
} }
testFunc := func(tst testInput) { for _, tt := range tests {
newRule := NewAlertingRule( t.Run(tt.name, func(t *testing.T) {
"HTTPRequestRateLow", if tt.before != nil {
expr, tt.before()
alertForDuration, }
0,
labels.FromStrings("severity", "critical"), newRule := NewAlertingRule(
labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil, "HTTPRequestRateLow",
) expr,
newGroup := NewGroup(GroupOptions{ alertForDuration,
Name: "default", 0,
Interval: time.Second, labels.FromStrings("severity", "critical"),
Rules: []Rule{newRule}, labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil,
ShouldRestore: true, )
Opts: opts, newGroup := NewGroup(GroupOptions{
Name: "default",
Interval: time.Second,
Rules: []Rule{newRule},
ShouldRestore: true,
Opts: opts,
})
newGroups := make(map[string]*Group)
newGroups["default;"] = newGroup
restoreTime := baseTime.Add(tt.restoreDuration)
// First eval before restoration.
newGroup.Eval(context.TODO(), restoreTime)
// Restore happens here.
newGroup.RestoreForState(restoreTime)
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
})
// In all cases, we expect the restoration process to have completed.
require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed")
// Checking if we have restored it correctly.
switch {
case tt.noRestore:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime)
}
case tt.gracePeriod:
require.Len(t, got, tt.num)
for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
default:
exp := tt.expectedAlerts
require.Equal(t, len(exp), len(got))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
require.Equal(t, e.Labels, got[i].Labels)
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tt.downDuration/time.Second) - got[i].ActiveAt.Unix())
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
}) })
newGroups := make(map[string]*Group)
newGroups["default;"] = newGroup
restoreTime := baseTime.Add(tst.restoreDuration)
// First eval before restoration.
newGroup.Eval(context.TODO(), restoreTime)
// Restore happens here.
newGroup.RestoreForState(restoreTime)
got := newRule.ActiveAlerts()
for _, aa := range got {
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
})
// Checking if we have restored it correctly.
switch {
case tst.noRestore:
require.Len(t, got, tst.num)
for _, e := range got {
require.Equal(t, e.ActiveAt, restoreTime)
}
case tst.gracePeriod:
require.Len(t, got, tst.num)
for _, e := range got {
require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
}
default:
exp := tst.alerts
require.Equal(t, len(exp), len(got))
sortAlerts(exp)
sortAlerts(got)
for i, e := range exp {
require.Equal(t, e.Labels, got[i].Labels)
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
} }
for _, tst := range tests {
testFunc(tst)
}
// Testing the grace period.
for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
evalTime := baseTime.Add(duration)
group.Eval(context.TODO(), evalTime)
}
testFunc(testInput{
restoreDuration: 25 * time.Minute,
alerts: []*Alert{},
gracePeriod: true,
num: 2,
})
} }
func TestStaleness(t *testing.T) { func TestStaleness(t *testing.T) {

View file

@ -81,6 +81,8 @@ type Options struct {
// Option to enable the ingestion of the created timestamp as a synthetic zero sample. // Option to enable the ingestion of the created timestamp as a synthetic zero sample.
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
EnableCreatedTimestampZeroIngestion bool EnableCreatedTimestampZeroIngestion bool
// Option to enable the ingestion of native histograms.
EnableNativeHistogramsIngestion bool
// Optional HTTP client options to use when scraping. // Optional HTTP client options to use when scraping.
HTTPClientOptions []config_util.HTTPClientOption HTTPClientOptions []config_util.HTTPClientOption

View file

@ -178,6 +178,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.interval, opts.interval,
opts.timeout, opts.timeout,
opts.scrapeClassicHistograms, opts.scrapeClassicHistograms,
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion, options.EnableCreatedTimestampZeroIngestion,
options.ExtraMetrics, options.ExtraMetrics,
options.EnableMetadataStorage, options.EnableMetadataStorage,
@ -827,7 +828,10 @@ type scrapeLoop struct {
interval time.Duration interval time.Duration
timeout time.Duration timeout time.Duration
scrapeClassicHistograms bool scrapeClassicHistograms bool
enableCTZeroIngestion bool
// Feature flagged options.
enableNativeHistogramIngestion bool
enableCTZeroIngestion bool
appender func(ctx context.Context) storage.Appender appender func(ctx context.Context) storage.Appender
symbolTable *labels.SymbolTable symbolTable *labels.SymbolTable
@ -1123,6 +1127,7 @@ func newScrapeLoop(ctx context.Context,
interval time.Duration, interval time.Duration,
timeout time.Duration, timeout time.Duration,
scrapeClassicHistograms bool, scrapeClassicHistograms bool,
enableNativeHistogramIngestion bool,
enableCTZeroIngestion bool, enableCTZeroIngestion bool,
reportExtraMetrics bool, reportExtraMetrics bool,
appendMetadataToWAL bool, appendMetadataToWAL bool,
@ -1153,33 +1158,34 @@ func newScrapeLoop(ctx context.Context,
} }
sl := &scrapeLoop{ sl := &scrapeLoop{
scraper: sc, scraper: sc,
buffers: buffers, buffers: buffers,
cache: cache, cache: cache,
appender: appender, appender: appender,
symbolTable: symbolTable, symbolTable: symbolTable,
sampleMutator: sampleMutator, sampleMutator: sampleMutator,
reportSampleMutator: reportSampleMutator, reportSampleMutator: reportSampleMutator,
stopped: make(chan struct{}), stopped: make(chan struct{}),
offsetSeed: offsetSeed, offsetSeed: offsetSeed,
l: l, l: l,
parentCtx: ctx, parentCtx: ctx,
appenderCtx: appenderCtx, appenderCtx: appenderCtx,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
trackTimestampsStaleness: trackTimestampsStaleness, trackTimestampsStaleness: trackTimestampsStaleness,
enableCompression: enableCompression, enableCompression: enableCompression,
sampleLimit: sampleLimit, sampleLimit: sampleLimit,
bucketLimit: bucketLimit, bucketLimit: bucketLimit,
maxSchema: maxSchema, maxSchema: maxSchema,
labelLimits: labelLimits, labelLimits: labelLimits,
interval: interval, interval: interval,
timeout: timeout, timeout: timeout,
scrapeClassicHistograms: scrapeClassicHistograms, scrapeClassicHistograms: scrapeClassicHistograms,
enableCTZeroIngestion: enableCTZeroIngestion, enableNativeHistogramIngestion: enableNativeHistogramIngestion,
reportExtraMetrics: reportExtraMetrics, enableCTZeroIngestion: enableCTZeroIngestion,
appendMetadataToWAL: appendMetadataToWAL, reportExtraMetrics: reportExtraMetrics,
metrics: metrics, appendMetadataToWAL: appendMetadataToWAL,
skipOffsetting: skipOffsetting, metrics: metrics,
skipOffsetting: skipOffsetting,
} }
sl.ctx, sl.cancel = context.WithCancel(ctx) sl.ctx, sl.cancel = context.WithCancel(ctx)
@ -1627,7 +1633,7 @@ loop:
} }
} }
if isHistogram { if isHistogram && sl.enableNativeHistogramIngestion {
if h != nil { if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil) ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else { } else {

View file

@ -678,6 +678,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
newTestScrapeMetrics(t), newTestScrapeMetrics(t),
@ -819,6 +820,7 @@ func TestScrapeLoopRun(t *testing.T) {
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
scrapeMetrics, scrapeMetrics,
@ -962,6 +964,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
false, false,
false, false,
false, false,
false,
nil, nil,
false, false,
scrapeMetrics, scrapeMetrics,
@ -1571,6 +1574,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
app := &bucketLimitAppender{Appender: resApp, limit: 2} app := &bucketLimitAppender{Appender: resApp, limit: 2}
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = true
sl.sampleMutator = func(l labels.Labels) labels.Labels { sl.sampleMutator = func(l labels.Labels) labels.Labels {
if l.Has("deleteme") { if l.Has("deleteme") {
return labels.EmptyLabels() return labels.EmptyLabels()
@ -1797,14 +1801,15 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) {
func TestScrapeLoopAppendExemplar(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) {
tests := []struct { tests := []struct {
title string title string
scrapeClassicHistograms bool scrapeClassicHistograms bool
scrapeText string enableNativeHistogramsIngestion bool
contentType string scrapeText string
discoveryLabels []string contentType string
floats []floatSample discoveryLabels []string
histograms []histogramSample floats []floatSample
exemplars []exemplar.Exemplar histograms []histogramSample
exemplars []exemplar.Exemplar
}{ }{
{ {
title: "Metric without exemplars", title: "Metric without exemplars",
@ -1862,6 +1867,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
}, },
{ {
title: "Native histogram with three exemplars", title: "Native histogram with three exemplars",
enableNativeHistogramsIngestion: true,
scrapeText: `name: "test_histogram" scrapeText: `name: "test_histogram"
help: "Test histogram with many buckets removed to keep it manageable in size." help: "Test histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM type: HISTOGRAM
@ -1976,6 +1983,8 @@ metric: <
}, },
{ {
title: "Native histogram with three exemplars scraped as classic histogram", title: "Native histogram with three exemplars scraped as classic histogram",
enableNativeHistogramsIngestion: true,
scrapeText: `name: "test_histogram" scrapeText: `name: "test_histogram"
help: "Test histogram with many buckets removed to keep it manageable in size." help: "Test histogram with many buckets removed to keep it manageable in size."
type: HISTOGRAM type: HISTOGRAM
@ -2115,6 +2124,7 @@ metric: <
} }
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0)
sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion
sl.sampleMutator = func(l labels.Labels) labels.Labels { sl.sampleMutator = func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil) return mutateSampleLabels(l, discoveryLabels, false, nil)
} }
@ -3710,7 +3720,7 @@ scrape_configs:
s.DB.EnableNativeHistograms() s.DB.EnableNativeHistograms()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
mng, err := NewManager(nil, nil, s, reg) mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, s, reg)
require.NoError(t, err) require.NoError(t, err)
cfg, err := config.Load(configStr, false, log.NewNopLogger()) cfg, err := config.Load(configStr, false, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)

View file

@ -33,6 +33,6 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 uses: golangci/golangci-lint-action@9d1e0624a798bb64f6c3cea93db47765312263dc # v5.1.0
with: with:
version: v1.56.2 version: v1.56.2

View file

@ -1,22 +0,0 @@
## Copying from opentelemetry/opentelemetry-collector-contrib
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
To update the dependency is a multi-step process:
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
1. Update the VERSION in `update-copy.sh`.
1. Run `./update-copy.sh`.
### Why copy?
This is because the packages we copy depend on the [`prompb`](https://github.com/prometheus/prometheus/blob/main/prompb) package. While the package is relatively stable, there are still changes. For example, https://github.com/prometheus/prometheus/pull/11935 changed the types.
This means if we depend on the upstream packages directly, we will never able to make the changes like above. Hence we're copying the code for now.
### I need to manually change these files
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
script again to keep things updated.
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -1,9 +1,20 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheus
// SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import "strings" import "strings"

View file

@ -1,29 +1,42 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/helper.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"log" "log"
"math" "math"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings"
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
conventions "go.opentelemetry.io/collector/semconv/v1.6.1" conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )
@ -48,7 +61,7 @@ const (
) )
type bucketBoundsData struct { type bucketBoundsData struct {
sig string ts *prompb.TimeSeries
bound float64 bound float64
} }
@ -66,94 +79,47 @@ func (a ByLabelName) Len() int { return len(a) }
func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it // timeSeriesSignature returns a hashed label set signature.
// creates a new TimeSeries in the map if not found and returns the time series signature. // The label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil.
func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label,
datatype string) string {
if sample == nil || labels == nil || tsMap == nil {
// This shouldn't happen
return ""
}
sig := timeSeriesSignature(datatype, labels)
ts := tsMap[sig]
if ts != nil {
ts.Samples = append(ts.Samples, *sample)
} else {
newTs := &prompb.TimeSeries{
Labels: labels,
Samples: []prompb.Sample{*sample},
}
tsMap[sig] = newTs
}
return sig
}
// addExemplars finds a bucket bound that corresponds to the exemplars value and add the exemplar to the specific sig;
// we only add exemplars if samples are presents
// tsMap is unmodified if either of its parameters is nil and samples are nil.
func addExemplars(tsMap map[string]*prompb.TimeSeries, exemplars []prompb.Exemplar, bucketBoundsData []bucketBoundsData) {
if len(tsMap) == 0 || len(bucketBoundsData) == 0 || len(exemplars) == 0 {
return
}
sort.Sort(byBucketBoundsData(bucketBoundsData))
for _, exemplar := range exemplars {
addExemplar(tsMap, bucketBoundsData, exemplar)
}
}
func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBoundsData, exemplar prompb.Exemplar) {
for _, bucketBound := range bucketBounds {
sig := bucketBound.sig
bound := bucketBound.bound
ts := tsMap[sig]
if ts != nil && len(ts.Samples) > 0 && exemplar.Value <= bound {
ts.Exemplars = append(ts.Exemplars, exemplar)
return
}
}
}
// timeSeries return a string signature in the form of:
//
// TYPE-label1-value1- ... -labelN-valueN
//
// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating
// the signature. // the signature.
func timeSeriesSignature(datatype string, labels []prompb.Label) string { // The algorithm is the same as in Prometheus' labels.StableHash function.
length := len(datatype) func timeSeriesSignature(labels []prompb.Label) uint64 {
for _, lb := range labels {
length += 2 + len(lb.GetName()) + len(lb.GetValue())
}
b := strings.Builder{}
b.Grow(length)
b.WriteString(datatype)
sort.Sort(ByLabelName(labels)) sort.Sort(ByLabelName(labels))
for _, lb := range labels { // Use xxhash.Sum64(b) for fast path as it's faster.
b.WriteString("-") b := make([]byte, 0, 1024)
b.WriteString(lb.GetName()) for i, v := range labels {
b.WriteString("-") if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
b.WriteString(lb.GetValue()) // If labels entry is 1KB+ do not allocate whole entry.
} h := xxhash.New()
_, _ = h.Write(b)
for _, v := range labels[i:] {
_, _ = h.WriteString(v.Name)
_, _ = h.Write(seps)
_, _ = h.WriteString(v.Value)
_, _ = h.Write(seps)
}
return h.Sum64()
}
return b.String() b = append(b, v.Name...)
b = append(b, seps[0])
b = append(b, v.Value...)
b = append(b, seps[0])
}
return xxhash.Sum64(b)
} }
var seps = []byte{'\xff'}
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen, and overwrites are // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// logged. Resulting label names are sanitized. // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName) ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID) resourceAttrs := resource.Attributes()
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
// Calculate the maximum possible number of labels we could return so we can preallocate l // Calculate the maximum possible number of labels we could return so we can preallocate l
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
@ -171,9 +137,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// Ensure attributes are sorted by key for consistent merging of keys which // Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized. // collide when sanitized.
labels := make([]prompb.Label, 0, attributes.Len()) labels := make([]prompb.Label, 0, maxLabelCount)
// XXX: Should we always drop service namespace/service name/service instance ID from the labels
// (as they get mapped to other Prometheus labels)?
attributes.Range(func(key string, value pcommon.Value) bool { attributes.Range(func(key string, value pcommon.Value) bool {
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) if !slices.Contains(ignoreAttrs, key) {
labels = append(labels, prompb.Label{Name: key, Value: value.AsString()})
}
return true return true
}) })
sort.Stable(ByLabelName(labels)) sort.Stable(ByLabelName(labels))
@ -190,7 +160,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
// Map service.name + service.namespace to job // Map service.name + service.namespace to job
if haveServiceName { if haveServiceName {
val := serviceName.AsString() val := serviceName.AsString()
if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok {
val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val)
} }
l[model.JobLabel] = val l[model.JobLabel] = val
@ -213,7 +183,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
break break
} }
_, found := l[extras[i]] _, found := l[extras[i]]
if found { if found && logOnOverwrite {
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
} }
// internal labels should be maintained // internal labels should be maintained
@ -224,12 +194,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
l[name] = extras[i+1] l[name] = extras[i+1]
} }
s := make([]prompb.Label, 0, len(l)) labels = labels[:0]
for k, v := range l { for k, v := range l {
s = append(s, prompb.Label{Name: k, Value: v}) labels = append(labels, prompb.Label{Name: k, Value: v})
} }
return s return labels
} }
// isValidAggregationTemporality checks whether an OTel metric has a valid // isValidAggregationTemporality checks whether an OTel metric has a valid
@ -249,100 +219,84 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
return false return false
} }
// addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) resource pcommon.Resource, settings Settings, baseName string) {
func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries, baseName string) { for x := 0; x < dataPoints.Len(); x++ {
timestamp := convertTimeStamp(pt.Timestamp()) pt := dataPoints.At(x)
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
createLabels := func(nameSuffix string, extras ...string) []prompb.Label { // If the sum is unset, it indicates the _sum metric point should be
extraLabelCount := len(extras) / 2 // omitted
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name if pt.HasSum() {
copy(labels, baseLabels) // treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
sumlabels := createLabels(baseName+sumStr, baseLabels)
c.addSample(sum, sumlabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
} }
// sum, count, and buckets of the histogram should append suffix to baseName // treat count as a sample in an individual TimeSeries
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: baseName + nameSuffix}) count := &prompb.Sample{
Value: float64(pt.Count()),
return labels
}
// If the sum is unset, it indicates the _sum metric point should be
// omitted
if pt.HasSum() {
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN) count.Value = math.Float64frombits(value.StaleNaN)
} }
sumlabels := createLabels(sumStr) countlabels := createLabels(baseName+countStr, baseLabels)
addSample(tsMap, sum, sumlabels, metric.Type().String()) c.addSample(count, countlabels)
} // cumulative count for conversion to cumulative histogram
var cumulativeCount uint64
// treat count as a sample in an individual TimeSeries var bucketBounds []bucketBoundsData
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(countStr) // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
addSample(tsMap, count, countlabels, metric.Type().String()) for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
bucket := &prompb.Sample{
Value: float64(cumulativeCount),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
bucket.Value = math.Float64frombits(value.StaleNaN)
}
boundStr := strconv.FormatFloat(bound, 'f', -1, 64)
labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr)
ts := c.addSample(bucket, labels)
// cumulative count for conversion to cumulative histogram bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound})
var cumulativeCount uint64 }
// add le=+Inf bucket
promExemplars := getPromExemplars[pmetric.HistogramDataPoint](pt) infBucket := &prompb.Sample{
var bucketBounds []bucketBoundsData
// process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1
for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ {
bound := pt.ExplicitBounds().At(i)
cumulativeCount += pt.BucketCounts().At(i)
bucket := &prompb.Sample{
Value: float64(cumulativeCount),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
bucket.Value = math.Float64frombits(value.StaleNaN) infBucket.Value = math.Float64frombits(value.StaleNaN)
} else {
infBucket.Value = float64(pt.Count())
} }
boundStr := strconv.FormatFloat(bound, 'f', -1, 64) infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr)
labels := createLabels(bucketStr, leStr, boundStr) ts := c.addSample(infBucket, infLabels)
sig := addSample(tsMap, bucket, labels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)})
} c.addExemplars(pt, bucketBounds)
// add le=+Inf bucket
infBucket := &prompb.Sample{
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
infBucket.Value = math.Float64frombits(value.StaleNaN)
} else {
infBucket.Value = float64(pt.Count())
}
infLabels := createLabels(bucketStr, leStr, pInfStr)
sig := addSample(tsMap, infBucket, infLabels, metric.Type().String())
bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) startTimestamp := pt.StartTimestamp()
addExemplars(tsMap, promExemplars, bucketBounds) if settings.ExportCreatedMetric && startTimestamp != 0 {
labels := createLabels(baseName+createdSuffix, baseLabels)
// add _created time series if needed c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp())
startTimestamp := pt.StartTimestamp() }
if settings.ExportCreatedMetric && startTimestamp != 0 {
labels := createLabels(createdSuffix)
addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, pt.Timestamp(), metric.Type().String())
} }
} }
@ -415,162 +369,203 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
case pmetric.MetricTypeGauge: case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints() dataPoints := metric.Gauge().DataPoints()
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) ts = max(ts, dataPoints.At(x).Timestamp())
} }
case pmetric.MetricTypeSum: case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints() dataPoints := metric.Sum().DataPoints()
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) ts = max(ts, dataPoints.At(x).Timestamp())
} }
case pmetric.MetricTypeHistogram: case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints() dataPoints := metric.Histogram().DataPoints()
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) ts = max(ts, dataPoints.At(x).Timestamp())
} }
case pmetric.MetricTypeExponentialHistogram: case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints() dataPoints := metric.ExponentialHistogram().DataPoints()
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) ts = max(ts, dataPoints.At(x).Timestamp())
} }
case pmetric.MetricTypeSummary: case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints() dataPoints := metric.Summary().DataPoints()
for x := 0; x < dataPoints.Len(); x++ { for x := 0; x < dataPoints.Len(); x++ {
ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) ts = max(ts, dataPoints.At(x).Timestamp())
} }
} }
return ts return ts
} }
func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
if a > b { settings Settings, baseName string) {
return a for x := 0; x < dataPoints.Len(); x++ {
} pt := dataPoints.At(x)
return b timestamp := convertTimeStamp(pt.Timestamp())
} baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. // treat sum as a sample in an individual TimeSeries
func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, sum := &prompb.Sample{
tsMap map[string]*prompb.TimeSeries, baseName string) { Value: pt.Sum(),
timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels)
createLabels := func(name string, extras ...string) []prompb.Label {
extraLabelCount := len(extras) / 2
labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
copy(labels, baseLabels)
for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ {
labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
}
labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
return labels
}
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
Value: pt.Sum(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
sum.Value = math.Float64frombits(value.StaleNaN)
}
// sum and count of the summary should append suffix to baseName
sumlabels := createLabels(baseName + sumStr)
addSample(tsMap, sum, sumlabels, metric.Type().String())
// treat count as a sample in an individual TimeSeries
count := &prompb.Sample{
Value: float64(pt.Count()),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(baseName + countStr)
addSample(tsMap, count, countlabels, metric.Type().String())
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
quantile := &prompb.Sample{
Value: qt.Value(),
Timestamp: timestamp, Timestamp: timestamp,
} }
if pt.Flags().NoRecordedValue() { if pt.Flags().NoRecordedValue() {
quantile.Value = math.Float64frombits(value.StaleNaN) sum.Value = math.Float64frombits(value.StaleNaN)
} }
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) // sum and count of the summary should append suffix to baseName
qtlabels := createLabels(baseName, quantileStr, percentileStr) sumlabels := createLabels(baseName+sumStr, baseLabels)
addSample(tsMap, quantile, qtlabels, metric.Type().String()) c.addSample(sum, sumlabels)
}
// add _created time series if needed // treat count as a sample in an individual TimeSeries
startTimestamp := pt.StartTimestamp() count := &prompb.Sample{
if settings.ExportCreatedMetric && startTimestamp != 0 { Value: float64(pt.Count()),
createdLabels := createLabels(baseName + createdSuffix) Timestamp: timestamp,
addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String()) }
if pt.Flags().NoRecordedValue() {
count.Value = math.Float64frombits(value.StaleNaN)
}
countlabels := createLabels(baseName+countStr, baseLabels)
c.addSample(count, countlabels)
// process each percentile/quantile
for i := 0; i < pt.QuantileValues().Len(); i++ {
qt := pt.QuantileValues().At(i)
quantile := &prompb.Sample{
Value: qt.Value(),
Timestamp: timestamp,
}
if pt.Flags().NoRecordedValue() {
quantile.Value = math.Float64frombits(value.StaleNaN)
}
percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64)
qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr)
c.addSample(quantile, qtlabels)
}
startTimestamp := pt.StartTimestamp()
if settings.ExportCreatedMetric && startTimestamp != 0 {
createdLabels := createLabels(baseName+createdSuffix, baseLabels)
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
}
} }
} }
// addCreatedTimeSeriesIfNeeded adds {name}_created time series with a single // createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name.
// sample. If the series exists, then new samples won't be added. // If extras are provided, corresponding label pairs are also added to the returned slice.
func addCreatedTimeSeriesIfNeeded( // If extras is uneven length, the last (unpaired) extra will be ignored.
series map[string]*prompb.TimeSeries, func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label {
labels []prompb.Label, extraLabelCount := len(extras) / 2
startTimestamp pcommon.Timestamp, labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name
timestamp pcommon.Timestamp, copy(labels, baseLabels)
metricType string,
) { n := len(extras)
sig := timeSeriesSignature(metricType, labels) n -= n % 2
if _, ok := series[sig]; !ok { for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 {
series[sig] = &prompb.TimeSeries{ labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]})
Labels: labels, }
Samples: []prompb.Sample{
{ // convert ns to ms labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name})
Value: float64(convertTimeStamp(startTimestamp)), return labels
Timestamp: convertTimeStamp(timestamp), }
},
// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false.
// Otherwise it creates a new one and returns that, and true.
func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) {
h := timeSeriesSignature(lbls)
ts := c.unique[h]
if ts != nil {
if isSameMetric(ts, lbls) {
// We already have this metric
return ts, false
}
// Look for a matching conflict
for _, cTS := range c.conflicts[h] {
if isSameMetric(cTS, lbls) {
// We already have this metric
return cTS, false
}
}
// New conflict
ts = &prompb.TimeSeries{
Labels: lbls,
}
c.conflicts[h] = append(c.conflicts[h], ts)
return ts, true
}
// This metric is new
ts = &prompb.TimeSeries{
Labels: lbls,
}
c.unique[h] = ts
return ts, true
}
// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist.
// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp,
// both converted to milliseconds.
func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) {
ts, created := c.getOrCreateTimeSeries(lbls)
if created {
ts.Samples = []prompb.Sample{
{
// convert ns to ms
Value: float64(convertTimeStamp(startTimestamp)),
Timestamp: convertTimeStamp(timestamp),
}, },
} }
} }
} }
// addResourceTargetInfo converts the resource to the target info metric // addResourceTargetInfo converts the resource to the target info metric.
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) { func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *PrometheusConverter) {
if settings.DisableTargetInfo { if settings.DisableTargetInfo || timestamp == 0 {
return return
} }
// Use resource attributes (other than those used for job+instance) as the
// metric labels for the target info metric attributes := resource.Attributes()
attributes := pcommon.NewMap() identifyingAttrs := []string{
resource.Attributes().CopyTo(attributes) conventions.AttributeServiceNamespace,
attributes.RemoveIf(func(k string, _ pcommon.Value) bool { conventions.AttributeServiceName,
switch k { conventions.AttributeServiceInstanceID,
case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID: }
// Remove resource attributes used for job + instance nonIdentifyingAttrsCount := attributes.Len()
return true for _, a := range identifyingAttrs {
default: _, haveAttr := attributes.Get(a)
return false if haveAttr {
nonIdentifyingAttrsCount--
} }
}) }
if attributes.Len() == 0 { if nonIdentifyingAttrsCount == 0 {
// If we only have job + instance, then target_info isn't useful, so don't add it. // If we only have job + instance, then target_info isn't useful, so don't add it.
return return
} }
// create parameters for addSample
name := targetMetricName name := targetMetricName
if len(settings.Namespace) > 0 { if len(settings.Namespace) > 0 {
name = settings.Namespace + "_" + name name = settings.Namespace + "_" + name
} }
labels := createAttributes(resource, attributes, settings.ExternalLabels, model.MetricNameLabel, name)
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
haveIdentifier := false
for _, l := range labels {
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
haveIdentifier = true
break
}
}
if !haveIdentifier {
// We need at least one identifying label to generate target_info.
return
}
sample := &prompb.Sample{ sample := &prompb.Sample{
Value: float64(1), Value: float64(1),
// convert ns to ms // convert ns to ms
Timestamp: convertTimeStamp(timestamp), Timestamp: convertTimeStamp(timestamp),
} }
addSample(tsMap, sample, labels, infoType) converter.addSample(sample, labels)
} }
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms

View file

@ -1,58 +1,59 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/histograms.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"fmt" "fmt"
"math" "math"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
) )
const defaultZeroThreshold = 1e-128 const defaultZeroThreshold = 1e-128
func addSingleExponentialHistogramDataPoint( func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
metric string, resource pcommon.Resource, settings Settings, baseName string) error {
pt pmetric.ExponentialHistogramDataPoint, for x := 0; x < dataPoints.Len(); x++ {
resource pcommon.Resource, pt := dataPoints.At(x)
settings Settings, lbls := createAttributes(
series map[string]*prompb.TimeSeries, resource,
) error { pt.Attributes(),
labels := createAttributes( settings.ExternalLabels,
resource, nil,
pt.Attributes(), true,
settings.ExternalLabels, model.MetricNameLabel,
model.MetricNameLabel, baseName,
metric, )
) ts, _ := c.getOrCreateTimeSeries(lbls)
sig := timeSeriesSignature( histogram, err := exponentialToNativeHistogram(pt)
pmetric.MetricTypeExponentialHistogram.String(), if err != nil {
labels, return err
)
ts, ok := series[sig]
if !ok {
ts = &prompb.TimeSeries{
Labels: labels,
} }
series[sig] = ts ts.Histograms = append(ts.Histograms, histogram)
}
histogram, err := exponentialToNativeHistogram(pt) exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
if err != nil { ts.Exemplars = append(ts.Exemplars, exemplars...)
return err
} }
ts.Histograms = append(ts.Histograms, histogram)
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
return nil return nil
} }

View file

@ -1,19 +1,31 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"errors" "errors"
"fmt" "fmt"
"sort"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/multierr" "go.uber.org/multierr"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )
@ -26,10 +38,21 @@ type Settings struct {
SendMetadata bool SendMetadata bool
} }
// FromMetrics converts pmetric.Metrics to Prometheus remote write format. // PrometheusConverter converts from OTel write format to Prometheus write format.
func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) { type PrometheusConverter struct {
tsMap = make(map[string]*prompb.TimeSeries) unique map[uint64]*prompb.TimeSeries
conflicts map[uint64][]*prompb.TimeSeries
}
func NewPrometheusConverter() *PrometheusConverter {
return &PrometheusConverter{
unique: map[uint64]*prompb.TimeSeries{},
conflicts: map[uint64][]*prompb.TimeSeries{},
}
}
// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (errs error) {
resourceMetricsSlice := md.ResourceMetrics() resourceMetricsSlice := md.ResourceMetrics()
for i := 0; i < resourceMetricsSlice.Len(); i++ { for i := 0; i < resourceMetricsSlice.Len(); i++ {
resourceMetrics := resourceMetricsSlice.At(i) resourceMetrics := resourceMetricsSlice.At(i)
@ -39,13 +62,12 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
// use with the "target" info metric // use with the "target" info metric
var mostRecentTimestamp pcommon.Timestamp var mostRecentTimestamp pcommon.Timestamp
for j := 0; j < scopeMetricsSlice.Len(); j++ { for j := 0; j < scopeMetricsSlice.Len(); j++ {
scopeMetrics := scopeMetricsSlice.At(j) metricSlice := scopeMetricsSlice.At(j).Metrics()
metricSlice := scopeMetrics.Metrics()
// TODO: decide if instrumentation library information should be exported as labels // TODO: decide if instrumentation library information should be exported as labels
for k := 0; k < metricSlice.Len(); k++ { for k := 0; k < metricSlice.Len(); k++ {
metric := metricSlice.At(k) metric := metricSlice.At(k)
mostRecentTimestamp = maxTimestamp(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric))
if !isValidAggregationTemporality(metric) { if !isValidAggregationTemporality(metric) {
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
@ -54,65 +76,125 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp
promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
// handle individual metric based on type // handle individual metrics based on type
//exhaustive:enforce //exhaustive:enforce
switch metric.Type() { switch metric.Type() {
case pmetric.MetricTypeGauge: case pmetric.MetricTypeGauge:
dataPoints := metric.Gauge().DataPoints() dataPoints := metric.Gauge().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName)
addSingleGaugeNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeSum: case pmetric.MetricTypeSum:
dataPoints := metric.Sum().DataPoints() dataPoints := metric.Sum().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName)
addSingleSumNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeHistogram: case pmetric.MetricTypeHistogram:
dataPoints := metric.Histogram().DataPoints() dataPoints := metric.Histogram().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addHistogramDataPoints(dataPoints, resource, settings, promName)
addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
case pmetric.MetricTypeExponentialHistogram: case pmetric.MetricTypeExponentialHistogram:
dataPoints := metric.ExponentialHistogram().DataPoints() dataPoints := metric.ExponentialHistogram().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { errs = multierr.Append(errs, c.addExponentialHistogramDataPoints(
errs = multierr.Append( dataPoints,
errs, resource,
addSingleExponentialHistogramDataPoint( settings,
promName, promName,
dataPoints.At(x), ))
resource,
settings,
tsMap,
),
)
}
case pmetric.MetricTypeSummary: case pmetric.MetricTypeSummary:
dataPoints := metric.Summary().DataPoints() dataPoints := metric.Summary().DataPoints()
if dataPoints.Len() == 0 { if dataPoints.Len() == 0 {
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
break
} }
for x := 0; x < dataPoints.Len(); x++ { c.addSummaryDataPoints(dataPoints, resource, settings, promName)
addSingleSummaryDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName)
}
default: default:
errs = multierr.Append(errs, errors.New("unsupported metric type")) errs = multierr.Append(errs, errors.New("unsupported metric type"))
} }
} }
} }
addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap) addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
} }
return return
} }
// TimeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format.
func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries {
conflicts := 0
for _, ts := range c.conflicts {
conflicts += len(ts)
}
allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts)
for _, ts := range c.unique {
allTS = append(allTS, *ts)
}
for _, cTS := range c.conflicts {
for _, ts := range cTS {
allTS = append(allTS, *ts)
}
}
return allTS
}
func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
if len(ts.Labels) != len(lbls) {
return false
}
for i, l := range ts.Labels {
if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value {
return false
}
}
return true
}
// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value,
// the exemplar is added to the bucket bound's time series, provided that the time series' has samples.
func (c *PrometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) {
if len(bucketBounds) == 0 {
return
}
exemplars := getPromExemplars(dataPoint)
if len(exemplars) == 0 {
return
}
sort.Sort(byBucketBoundsData(bucketBounds))
for _, exemplar := range exemplars {
for _, bound := range bucketBounds {
if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound {
bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar)
break
}
}
}
}
// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it.
// If there is no corresponding TimeSeries already, it's created.
// The corresponding TimeSeries is returned.
// If either lbls is nil/empty or sample is nil, nothing is done.
func (c *PrometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries {
if sample == nil || len(lbls) == 0 {
// This shouldn't happen
return nil
}
ts, _ := c.getOrCreateTimeSeries(lbls)
ts.Samples = append(ts.Samples, *sample)
return ts
}

View file

@ -0,0 +1,134 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
package prometheusremotewrite
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
)
func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
for _, resourceAttributeCount := range []int{0, 5, 50} {
b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) {
for _, histogramCount := range []int{0, 1000} {
b.Run(fmt.Sprintf("histogram count: %v", histogramCount), func(b *testing.B) {
nonHistogramCounts := []int{0, 1000}
if resourceAttributeCount == 0 && histogramCount == 0 {
// Don't bother running a scenario where we'll generate no series.
nonHistogramCounts = []int{1000}
}
for _, nonHistogramCount := range nonHistogramCounts {
b.Run(fmt.Sprintf("non-histogram count: %v", nonHistogramCount), func(b *testing.B) {
for _, labelsPerMetric := range []int{2, 20} {
b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) {
for _, exemplarsPerSeries := range []int{0, 5, 10} {
b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) {
payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries)
for i := 0; i < b.N; i++ {
converter := NewPrometheusConverter()
require.NoError(b, converter.FromMetrics(payload.Metrics(), Settings{}))
require.NotNil(b, converter.TimeSeries())
}
})
}
})
}
})
}
})
}
})
}
}
func createExportRequest(resourceAttributeCount int, histogramCount int, nonHistogramCount int, labelsPerMetric int, exemplarsPerSeries int) pmetricotlp.ExportRequest {
request := pmetricotlp.NewExportRequest()
rm := request.Metrics().ResourceMetrics().AppendEmpty()
generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount)
metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
ts := pcommon.NewTimestampFromTime(time.Now())
for i := 1; i <= histogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptyHistogram()
m.SetName(fmt.Sprintf("histogram-%v", i))
m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
h := m.Histogram().DataPoints().AppendEmpty()
h.SetTimestamp(ts)
// Set 50 samples, 10 each with values 0.5, 1, 2, 4, and 8
h.SetCount(50)
h.SetSum(155)
h.BucketCounts().FromRaw([]uint64{10, 10, 10, 10, 10, 0})
h.ExplicitBounds().FromRaw([]float64{.5, 1, 2, 4, 8, 16}) // Bucket boundaries include the upper limit (ie. each sample is on the upper limit of its bucket)
generateAttributes(h.Attributes(), "series", labelsPerMetric)
generateExemplars(h.Exemplars(), exemplarsPerSeries, ts)
}
for i := 1; i <= nonHistogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptySum()
m.SetName(fmt.Sprintf("sum-%v", i))
m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
point := m.Sum().DataPoints().AppendEmpty()
point.SetTimestamp(ts)
point.SetDoubleValue(1.23)
generateAttributes(point.Attributes(), "series", labelsPerMetric)
generateExemplars(point.Exemplars(), exemplarsPerSeries, ts)
}
for i := 1; i <= nonHistogramCount; i++ {
m := metrics.AppendEmpty()
m.SetEmptyGauge()
m.SetName(fmt.Sprintf("gauge-%v", i))
point := m.Gauge().DataPoints().AppendEmpty()
point.SetTimestamp(ts)
point.SetDoubleValue(1.23)
generateAttributes(point.Attributes(), "series", labelsPerMetric)
generateExemplars(point.Exemplars(), exemplarsPerSeries, ts)
}
return request
}
func generateAttributes(m pcommon.Map, prefix string, count int) {
for i := 1; i <= count; i++ {
m.PutStr(fmt.Sprintf("%v-name-%v", prefix, i), fmt.Sprintf("value-%v", i))
}
}
func generateExemplars(exemplars pmetric.ExemplarSlice, count int, ts pcommon.Timestamp) {
for i := 1; i <= count; i++ {
e := exemplars.AppendEmpty()
e.SetTimestamp(ts)
e.SetDoubleValue(2.22)
e.SetSpanID(pcommon.SpanID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})
e.SetTraceID(pcommon.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f})
}
}

View file

@ -1,106 +1,110 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/number_data_points.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"math" "math"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/prompb"
) )
// addSingleGaugeNumberDataPoint converts the Gauge metric data point to a func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
// Prometheus time series with samples and labels. The result is stored in the resource pcommon.Resource, settings Settings, name string) {
// series map. for x := 0; x < dataPoints.Len(); x++ {
func addSingleGaugeNumberDataPoint( pt := dataPoints.At(x)
pt pmetric.NumberDataPoint, labels := createAttributes(
resource pcommon.Resource, resource,
metric pmetric.Metric, pt.Attributes(),
settings Settings, settings.ExternalLabels,
series map[string]*prompb.TimeSeries, nil,
name string, true,
) { model.MetricNameLabel,
labels := createAttributes( name,
resource, )
pt.Attributes(), sample := &prompb.Sample{
settings.ExternalLabels, // convert ns to ms
model.MetricNameLabel, Timestamp: convertTimeStamp(pt.Timestamp()),
name, }
) switch pt.ValueType() {
sample := &prompb.Sample{ case pmetric.NumberDataPointValueTypeInt:
// convert ns to ms sample.Value = float64(pt.IntValue())
Timestamp: convertTimeStamp(pt.Timestamp()), case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
c.addSample(sample, labels)
} }
switch pt.ValueType() {
case pmetric.NumberDataPointValueTypeInt:
sample.Value = float64(pt.IntValue())
case pmetric.NumberDataPointValueTypeDouble:
sample.Value = pt.DoubleValue()
}
if pt.Flags().NoRecordedValue() {
sample.Value = math.Float64frombits(value.StaleNaN)
}
addSample(series, sample, labels, metric.Type().String())
} }
// addSingleSumNumberDataPoint converts the Sum metric data point to a Prometheus func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice,
// time series with samples, labels and exemplars. The result is stored in the resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) {
// series map. for x := 0; x < dataPoints.Len(); x++ {
func addSingleSumNumberDataPoint( pt := dataPoints.At(x)
pt pmetric.NumberDataPoint, lbls := createAttributes(
resource pcommon.Resource, resource,
metric pmetric.Metric, pt.Attributes(),
settings Settings, settings.ExternalLabels,
series map[string]*prompb.TimeSeries, nil,
name string, true,
) { model.MetricNameLabel,
labels := createAttributes( name,
resource, )
pt.Attributes(), sample := &prompb.Sample{
settings.ExternalLabels, // convert ns to ms
model.MetricNameLabel, name, Timestamp: convertTimeStamp(pt.Timestamp()),
) }
sample := &prompb.Sample{ switch pt.ValueType() {
// convert ns to ms case pmetric.NumberDataPointValueTypeInt:
Timestamp: convertTimeStamp(pt.Timestamp()), sample.Value = float64(pt.IntValue())
} case pmetric.NumberDataPointValueTypeDouble:
switch pt.ValueType() { sample.Value = pt.DoubleValue()
case pmetric.NumberDataPointValueTypeInt: }
sample.Value = float64(pt.IntValue()) if pt.Flags().NoRecordedValue() {
case pmetric.NumberDataPointValueTypeDouble: sample.Value = math.Float64frombits(value.StaleNaN)
sample.Value = pt.DoubleValue() }
} ts := c.addSample(sample, lbls)
if pt.Flags().NoRecordedValue() { if ts != nil {
sample.Value = math.Float64frombits(value.StaleNaN) exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
} ts.Exemplars = append(ts.Exemplars, exemplars...)
sig := addSample(series, sample, labels, metric.Type().String())
if ts := series[sig]; sig != "" && ts != nil {
exemplars := getPromExemplars[pmetric.NumberDataPoint](pt)
ts.Exemplars = append(ts.Exemplars, exemplars...)
}
// add _created time series if needed
if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
startTimestamp := pt.StartTimestamp()
if startTimestamp == 0 {
return
} }
createdLabels := make([]prompb.Label, len(labels)) // add created time series if needed
copy(createdLabels, labels) if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() {
for i, l := range createdLabels { startTimestamp := pt.StartTimestamp()
if l.Name == model.MetricNameLabel { if startTimestamp == 0 {
createdLabels[i].Value = name + createdSuffix return
break
} }
createdLabels := make([]prompb.Label, len(lbls))
copy(createdLabels, lbls)
for i, l := range createdLabels {
if l.Name == model.MetricNameLabel {
createdLabels[i].Value = name + createdSuffix
break
}
}
c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp())
} }
addCreatedTimeSeriesIfNeeded(series, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String())
} }
} }

View file

@ -1,14 +1,25 @@
// DO NOT EDIT. COPIED AS-IS. SEE ../README.md // Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
// Copyright The OpenTelemetry Authors package prometheusremotewrite
// SPDX-License-Identifier: Apache-2.0
package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
import ( import (
"github.com/prometheus/prometheus/prompb"
"go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric"
"github.com/prometheus/prometheus/prompb"
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
) )

View file

@ -1,27 +0,0 @@
#!/bin/bash
set -xe
OTEL_VERSION=v0.95.0
git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp
cd ./tmp
git checkout $OTEL_VERSION
cd ..
rm -rf ./prometheusremotewrite/*
cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite
rm -rf ./prometheusremotewrite/*_test.go
rm -rf ./prometheus/*
cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus
rm -rf ./prometheus/*_test.go
rm -rf ./tmp
case $(sed --help 2>&1) in
*GNU*) set sed -i;;
*) set sed -i '';;
esac
"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go
"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go

View file

@ -208,21 +208,15 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{ converter := otlptranslator.NewPrometheusConverter()
if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
AddMetricSuffixes: true, AddMetricSuffixes: true,
}) }); err != nil {
if errs != nil { level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs)
}
prwMetrics := make([]prompb.TimeSeries, 0, len(prwMetricsMap))
for _, ts := range prwMetricsMap {
prwMetrics = append(prwMetrics, *ts)
} }
err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{
Timeseries: prwMetrics, Timeseries: converter.TimeSeries(),
}) })
switch { switch {

View file

@ -331,7 +331,7 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher)
return nil, err return nil, err
} }
var res []string res := vals[:0]
for _, val := range vals { for _, val := range vals {
if m.Matches(val) { if m.Matches(val) {
res = append(res, val) res = append(res, val)
@ -368,7 +368,7 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
return nil, err return nil, err
} }
var res []string res := vals[:0]
// If the inverse match is ="", we just want all the values. // If the inverse match is ="", we just want all the values.
if m.Type == labels.MatchEqual && m.Value == "" { if m.Type == labels.MatchEqual && m.Value == "" {
res = vals res = vals

View file

@ -15,7 +15,6 @@ package v1
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -35,6 +34,7 @@ import (
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
"github.com/go-kit/log" "github.com/go-kit/log"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -910,6 +910,7 @@ func TestStats(t *testing.T) {
require.IsType(t, &QueryData{}, i) require.IsType(t, &QueryData{}, i)
qd := i.(*QueryData) qd := i.(*QueryData)
require.NotNil(t, qd.Stats) require.NotNil(t, qd.Stats)
json := jsoniter.ConfigCompatibleWithStandardLibrary
j, err := json.Marshal(qd.Stats) j, err := json.Marshal(qd.Stats)
require.NoError(t, err) require.NoError(t, err)
require.JSONEq(t, `{"custom":"Custom Value"}`, string(j)) require.JSONEq(t, `{"custom":"Custom Value"}`, string(j))
@ -1171,6 +1172,25 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
}, },
}, },
}, },
// Test empty vector result
{
endpoint: api.query,
query: url.Values{
"query": []string{"bottomk(2, notExists)"},
},
responseAsJSON: `{"resultType":"vector","result":[]}`,
},
// Test empty matrix result
{
endpoint: api.queryRange,
query: url.Values{
"query": []string{"bottomk(2, notExists)"},
"start": []string{"0"},
"end": []string{"2"},
"step": []string{"1"},
},
responseAsJSON: `{"resultType":"matrix","result":[]}`,
},
// Missing query params in range queries. // Missing query params in range queries.
{ {
endpoint: api.queryRange, endpoint: api.queryRange,
@ -2891,10 +2911,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
if test.zeroFunc != nil { if test.zeroFunc != nil {
test.zeroFunc(res.data) test.zeroFunc(res.data)
} }
assertAPIResponse(t, res.data, test.response) if test.response != nil {
assertAPIResponse(t, res.data, test.response)
}
} }
if test.responseAsJSON != "" { if test.responseAsJSON != "" {
json := jsoniter.ConfigCompatibleWithStandardLibrary
s, err := json.Marshal(res.data) s, err := json.Marshal(res.data)
require.NoError(t, err) require.NoError(t, err)
require.JSONEq(t, test.responseAsJSON, string(s)) require.JSONEq(t, test.responseAsJSON, string(s))
@ -3292,18 +3315,7 @@ func TestRespondError(t *testing.T) {
require.Equal(t, want, have, "Return code %d expected in error response but got %d", want, have) require.Equal(t, want, have, "Return code %d expected in error response but got %d", want, have)
h := resp.Header.Get("Content-Type") h := resp.Header.Get("Content-Type")
require.Equal(t, "application/json", h, "Expected Content-Type %q but got %q", "application/json", h) require.Equal(t, "application/json", h, "Expected Content-Type %q but got %q", "application/json", h)
require.JSONEq(t, `{"status": "error", "data": "test", "errorType": "timeout", "error": "message"}`, string(body))
var res Response
err = json.Unmarshal(body, &res)
require.NoError(t, err, "Error unmarshaling JSON body")
exp := &Response{
Status: statusError,
Data: "test",
ErrorType: errorTimeout,
Error: "message",
}
require.Equal(t, exp, &res)
} }
func TestParseTimeParam(t *testing.T) { func TestParseTimeParam(t *testing.T) {

View file

@ -25,11 +25,13 @@ import (
) )
func init() { func init() {
jsoniter.RegisterTypeEncoderFunc("promql.Series", marshalSeriesJSON, marshalSeriesJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Vector", unsafeMarshalVectorJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.Sample", marshalSampleJSON, marshalSampleJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Matrix", unsafeMarshalMatrixJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.FPoint", marshalFPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Series", unsafeMarshalSeriesJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.HPoint", marshalHPointJSON, marshalPointJSONIsEmpty) jsoniter.RegisterTypeEncoderFunc("promql.Sample", unsafeMarshalSampleJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, marshalExemplarJSONEmpty) jsoniter.RegisterTypeEncoderFunc("promql.FPoint", unsafeMarshalFPointJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("promql.HPoint", unsafeMarshalHPointJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("exemplar.Exemplar", marshalExemplarJSON, neverEmpty)
jsoniter.RegisterTypeEncoderFunc("labels.Labels", unsafeMarshalLabelsJSON, labelsIsEmpty) jsoniter.RegisterTypeEncoderFunc("labels.Labels", unsafeMarshalLabelsJSON, labelsIsEmpty)
} }
@ -66,8 +68,12 @@ func (j JSONCodec) Encode(resp *Response) ([]byte, error) {
// < more histograms > // < more histograms >
// ], // ],
// }, // },
func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func unsafeMarshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
s := *((*promql.Series)(ptr)) s := *((*promql.Series)(ptr))
marshalSeriesJSON(s, stream)
}
func marshalSeriesJSON(s promql.Series, stream *jsoniter.Stream) {
stream.WriteObjectStart() stream.WriteObjectStart()
stream.WriteObjectField(`metric`) stream.WriteObjectField(`metric`)
marshalLabelsJSON(s.Metric, stream) marshalLabelsJSON(s.Metric, stream)
@ -78,7 +84,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectField(`values`) stream.WriteObjectField(`values`)
stream.WriteArrayStart() stream.WriteArrayStart()
} }
marshalFPointJSON(unsafe.Pointer(&p), stream) marshalFPointJSON(p, stream)
} }
if len(s.Floats) > 0 { if len(s.Floats) > 0 {
stream.WriteArrayEnd() stream.WriteArrayEnd()
@ -89,7 +95,7 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectField(`histograms`) stream.WriteObjectField(`histograms`)
stream.WriteArrayStart() stream.WriteArrayStart()
} }
marshalHPointJSON(unsafe.Pointer(&p), stream) marshalHPointJSON(p, stream)
} }
if len(s.Histograms) > 0 { if len(s.Histograms) > 0 {
stream.WriteArrayEnd() stream.WriteArrayEnd()
@ -97,7 +103,8 @@ func marshalSeriesJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool { // In the Prometheus API we render an empty object as `[]` or similar.
func neverEmpty(unsafe.Pointer) bool {
return false return false
} }
@ -122,8 +129,12 @@ func marshalSeriesJSONIsEmpty(unsafe.Pointer) bool {
// }, // },
// "histogram": [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ] // "histogram": [ 1435781451.781, { < histogram, see jsonutil.MarshalHistogram > } ]
// }, // },
func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func unsafeMarshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
s := *((*promql.Sample)(ptr)) s := *((*promql.Sample)(ptr))
marshalSampleJSON(s, stream)
}
func marshalSampleJSON(s promql.Sample, stream *jsoniter.Stream) {
stream.WriteObjectStart() stream.WriteObjectStart()
stream.WriteObjectField(`metric`) stream.WriteObjectField(`metric`)
marshalLabelsJSON(s.Metric, stream) marshalLabelsJSON(s.Metric, stream)
@ -145,13 +156,13 @@ func marshalSampleJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalSampleJSONIsEmpty(unsafe.Pointer) bool { // marshalFPointJSON writes `[ts, "1.234"]`.
return false func unsafeMarshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*promql.FPoint)(ptr))
marshalFPointJSON(p, stream)
} }
// marshalFPointJSON writes `[ts, "1.234"]`. func marshalFPointJSON(p promql.FPoint, stream *jsoniter.Stream) {
func marshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*promql.FPoint)(ptr))
stream.WriteArrayStart() stream.WriteArrayStart()
jsonutil.MarshalTimestamp(p.T, stream) jsonutil.MarshalTimestamp(p.T, stream)
stream.WriteMore() stream.WriteMore()
@ -160,8 +171,12 @@ func marshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
} }
// marshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`. // marshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`.
func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func unsafeMarshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
p := *((*promql.HPoint)(ptr)) p := *((*promql.HPoint)(ptr))
marshalHPointJSON(p, stream)
}
func marshalHPointJSON(p promql.HPoint, stream *jsoniter.Stream) {
stream.WriteArrayStart() stream.WriteArrayStart()
jsonutil.MarshalTimestamp(p.T, stream) jsonutil.MarshalTimestamp(p.T, stream)
stream.WriteMore() stream.WriteMore()
@ -169,10 +184,6 @@ func marshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteArrayEnd() stream.WriteArrayEnd()
} }
func marshalPointJSONIsEmpty(unsafe.Pointer) bool {
return false
}
// marshalExemplarJSON writes. // marshalExemplarJSON writes.
// //
// { // {
@ -201,10 +212,6 @@ func marshalExemplarJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteObjectEnd() stream.WriteObjectEnd()
} }
func marshalExemplarJSONEmpty(unsafe.Pointer) bool {
return false
}
func unsafeMarshalLabelsJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { func unsafeMarshalLabelsJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
labelsPtr := (*labels.Labels)(ptr) labelsPtr := (*labels.Labels)(ptr)
marshalLabelsJSON(*labelsPtr, stream) marshalLabelsJSON(*labelsPtr, stream)
@ -229,3 +236,29 @@ func labelsIsEmpty(ptr unsafe.Pointer) bool {
labelsPtr := (*labels.Labels)(ptr) labelsPtr := (*labels.Labels)(ptr)
return labelsPtr.IsEmpty() return labelsPtr.IsEmpty()
} }
// Marshal a Vector as `[sample,sample,...]` - empty Vector is `[]`.
func unsafeMarshalVectorJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
v := *((*promql.Vector)(ptr))
stream.WriteArrayStart()
for i, s := range v {
marshalSampleJSON(s, stream)
if i != len(v)-1 {
stream.WriteMore()
}
}
stream.WriteArrayEnd()
}
// Marshal a Matrix as `[series,series,...]` - empty Matrix is `[]`.
func unsafeMarshalMatrixJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
m := *((*promql.Matrix)(ptr))
stream.WriteArrayStart()
for i, s := range m {
marshalSeriesJSON(s, stream)
if i != len(m)-1 {
stream.WriteMore()
}
}
stream.WriteArrayEnd()
}

View file

@ -29,6 +29,40 @@ func TestJsonCodec_Encode(t *testing.T) {
response interface{} response interface{}
expected string expected string
}{ }{
{
response: &QueryData{
ResultType: parser.ValueTypeVector,
Result: promql.Vector{
promql.Sample{
Metric: labels.FromStrings("__name__", "foo"),
T: 1000,
F: 1,
},
promql.Sample{
Metric: labels.FromStrings("__name__", "bar"),
T: 2000,
F: 2,
},
},
},
expected: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"foo"},"value":[1,"1"]},{"metric":{"__name__":"bar"},"value":[2,"2"]}]}}`,
},
{
response: &QueryData{
ResultType: parser.ValueTypeMatrix,
Result: promql.Matrix{
promql.Series{
Metric: labels.FromStrings("__name__", "foo"),
Floats: []promql.FPoint{{F: 1, T: 1000}},
},
promql.Series{
Metric: labels.FromStrings("__name__", "bar"),
Floats: []promql.FPoint{{F: 2, T: 2000}},
},
},
},
expected: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"foo"},"values":[[1,"1"]]},{"metric":{"__name__":"bar"},"values":[[2,"2"]]}]}}`,
},
{ {
response: &QueryData{ response: &QueryData{
ResultType: parser.ValueTypeMatrix, ResultType: parser.ValueTypeMatrix,

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.51.2", "version": "0.52.0-rc.1",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,7 +29,7 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.51.2", "@prometheus-io/lezer-promql": "0.52.0-rc.1",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {

View file

@ -251,6 +251,12 @@ describe('analyzeCompletion test', () => {
pos: 11, // cursor is between the bracket after the string myL pos: 11, // cursor is between the bracket after the string myL
expectedContext: [{ kind: ContextKind.LabelName }], expectedContext: [{ kind: ContextKind.LabelName }],
}, },
{
title: 'continue to autocomplete QuotedLabelName in aggregate modifier',
expr: 'sum by ("myL")',
pos: 12, // cursor is between the bracket after the string myL
expectedContext: [{ kind: ContextKind.LabelName }],
},
{ {
title: 'autocomplete labelName in a list', title: 'autocomplete labelName in a list',
expr: 'sum by (myLabel1,)', expr: 'sum by (myLabel1,)',
@ -263,6 +269,12 @@ describe('analyzeCompletion test', () => {
pos: 23, // cursor is between the bracket after the string myLab pos: 23, // cursor is between the bracket after the string myLab
expectedContext: [{ kind: ContextKind.LabelName }], expectedContext: [{ kind: ContextKind.LabelName }],
}, },
{
title: 'autocomplete labelName in a list 2',
expr: 'sum by ("myLabel1", "myLab")',
pos: 27, // cursor is between the bracket after the string myLab
expectedContext: [{ kind: ContextKind.LabelName }],
},
{ {
title: 'autocomplete labelName associated to a metric', title: 'autocomplete labelName associated to a metric',
expr: 'metric_name{}', expr: 'metric_name{}',
@ -299,6 +311,12 @@ describe('analyzeCompletion test', () => {
pos: 22, // cursor is between the bracket after the comma pos: 22, // cursor is between the bracket after the comma
expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }], expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }],
}, },
{
title: 'continue to autocomplete quoted labelName associated to a metric',
expr: '{"metric_"}',
pos: 10, // cursor is between the bracket after the string metric_
expectedContext: [{ kind: ContextKind.MetricName, metricName: 'metric_' }],
},
{ {
title: 'autocomplete the labelValue with metricName + labelName', title: 'autocomplete the labelValue with metricName + labelName',
expr: 'metric_name{labelName=""}', expr: 'metric_name{labelName=""}',
@ -342,6 +360,30 @@ describe('analyzeCompletion test', () => {
}, },
], ],
}, },
{
title: 'autocomplete the labelValue with metricName + quoted labelName',
expr: 'metric_name{labelName="labelValue", "labelName"!=""}',
pos: 50, // cursor is between the quotes
expectedContext: [
{
kind: ContextKind.LabelValue,
metricName: 'metric_name',
labelName: 'labelName',
matchers: [
{
name: 'labelName',
type: Neq,
value: '',
},
{
name: 'labelName',
type: EqlSingle,
value: 'labelValue',
},
],
},
],
},
{ {
title: 'autocomplete the labelValue associated to a labelName', title: 'autocomplete the labelValue associated to a labelName',
expr: '{labelName=""}', expr: '{labelName=""}',
@ -427,6 +469,12 @@ describe('analyzeCompletion test', () => {
pos: 22, // cursor is after '!' pos: 22, // cursor is after '!'
expectedContext: [{ kind: ContextKind.MatchOp }], expectedContext: [{ kind: ContextKind.MatchOp }],
}, },
{
title: 'autocomplete matchOp 3',
expr: 'metric_name{"labelName"!}',
pos: 24, // cursor is after '!'
expectedContext: [{ kind: ContextKind.BinOp }],
},
{ {
title: 'autocomplete duration with offset', title: 'autocomplete duration with offset',
expr: 'http_requests_total offset 5', expr: 'http_requests_total offset 5',

View file

@ -29,7 +29,6 @@ import {
GroupingLabels, GroupingLabels,
Gte, Gte,
Gtr, Gtr,
LabelMatcher,
LabelMatchers, LabelMatchers,
LabelName, LabelName,
Lss, Lss,
@ -52,6 +51,9 @@ import {
SubqueryExpr, SubqueryExpr,
Unless, Unless,
VectorSelector, VectorSelector,
UnquotedLabelMatcher,
QuotedLabelMatcher,
QuotedLabelName,
} from '@prometheus-io/lezer-promql'; } from '@prometheus-io/lezer-promql';
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete'; import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
import { EditorState } from '@codemirror/state'; import { EditorState } from '@codemirror/state';
@ -181,7 +183,10 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num
let start = node.from; let start = node.from;
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) { if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos); start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
} else if (node.type.id === FunctionCallBody || (node.type.id === StringLiteral && node.parent?.type.id === LabelMatcher)) { } else if (
node.type.id === FunctionCallBody ||
(node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher))
) {
// When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string. // When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string.
start++; start++;
} else if ( } else if (
@ -212,7 +217,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.Duration }); result.push({ kind: ContextKind.Duration });
break; break;
} }
if (node.parent?.type.id === LabelMatcher) { if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
// In this case the current token is not itself a valid match op yet: // In this case the current token is not itself a valid match op yet:
// metric_name{labelName!} // metric_name{labelName!}
result.push({ kind: ContextKind.MatchOp }); result.push({ kind: ContextKind.MatchOp });
@ -380,7 +385,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
// sum by (myL) // sum by (myL)
// So we have to continue to autocomplete any kind of labelName // So we have to continue to autocomplete any kind of labelName
result.push({ kind: ContextKind.LabelName }); result.push({ kind: ContextKind.LabelName });
} else if (node.parent?.type.id === LabelMatcher) { } else if (node.parent?.type.id === UnquotedLabelMatcher) {
// In that case we are in the given situation: // In that case we are in the given situation:
// metric_name{myL} or {myL} // metric_name{myL} or {myL}
// so we have or to continue to autocomplete any kind of labelName or // so we have or to continue to autocomplete any kind of labelName or
@ -389,9 +394,9 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
} }
break; break;
case StringLiteral: case StringLiteral:
if (node.parent?.type.id === LabelMatcher) { if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) {
// In this case we are in the given situation: // In this case we are in the given situation:
// metric_name{labelName=""} // metric_name{labelName=""} or metric_name{"labelName"=""}
// So we can autocomplete the labelValue // So we can autocomplete the labelValue
// Get the labelName. // Get the labelName.
@ -399,18 +404,34 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
let labelName = ''; let labelName = '';
if (node.parent.firstChild?.type.id === LabelName) { if (node.parent.firstChild?.type.id === LabelName) {
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to); labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to);
} else if (node.parent.firstChild?.type.id === QuotedLabelName) {
labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to).slice(1, -1);
} }
// then find the metricName if it exists // then find the metricName if it exists
const metricName = getMetricNameInVectorSelector(node, state); const metricName = getMetricNameInVectorSelector(node, state);
// finally get the full matcher available // finally get the full matcher available
const matcherNode = walkBackward(node, LabelMatchers); const matcherNode = walkBackward(node, LabelMatchers);
const labelMatchers = buildLabelMatchers(matcherNode ? matcherNode.getChildren(LabelMatcher) : [], state); const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
let labelMatchers: Matcher[] = [];
for (const labelMatcherOpt of labelMatcherOpts) {
labelMatchers = labelMatchers.concat(buildLabelMatchers(matcherNode ? matcherNode.getChildren(labelMatcherOpt) : [], state));
}
result.push({ result.push({
kind: ContextKind.LabelValue, kind: ContextKind.LabelValue,
metricName: metricName, metricName: metricName,
labelName: labelName, labelName: labelName,
matchers: labelMatchers, matchers: labelMatchers,
}); });
} else if (node.parent?.parent?.type.id === GroupingLabels) {
// In this case we are in the given situation:
// sum by ("myL")
// So we have to continue to autocomplete any kind of labelName
result.push({ kind: ContextKind.LabelName });
} else if (node.parent?.parent?.type.id === LabelMatchers) {
// In that case we are in the given situation:
// {""} or {"metric_"}
// since this is for the QuotedMetricName we need to continue to autocomplete for the metric names
result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) });
} }
break; break;
case NumberLiteral: case NumberLiteral:

View file

@ -12,33 +12,75 @@
// limitations under the License. // limitations under the License.
import { SyntaxNode } from '@lezer/common'; import { SyntaxNode } from '@lezer/common';
import { EqlRegex, EqlSingle, LabelName, MatchOp, Neq, NeqRegex, StringLiteral } from '@prometheus-io/lezer-promql'; import {
EqlRegex,
EqlSingle,
LabelName,
MatchOp,
Neq,
NeqRegex,
StringLiteral,
UnquotedLabelMatcher,
QuotedLabelMatcher,
QuotedLabelName,
} from '@prometheus-io/lezer-promql';
import { EditorState } from '@codemirror/state'; import { EditorState } from '@codemirror/state';
import { Matcher } from '../types'; import { Matcher } from '../types';
function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher { function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher {
const matcher = new Matcher(0, '', ''); const matcher = new Matcher(0, '', '');
const cursor = labelMatcher.cursor(); const cursor = labelMatcher.cursor();
if (!cursor.next()) { switch (cursor.type.id) {
// weird case, that would mean the labelMatcher doesn't have any child. case QuotedLabelMatcher:
return matcher; if (!cursor.next()) {
} // weird case, that would mean the QuotedLabelMatcher doesn't have any child.
do { return matcher;
switch (cursor.type.id) { }
case LabelName: do {
matcher.name = state.sliceDoc(cursor.from, cursor.to); switch (cursor.type.id) {
break; case QuotedLabelName:
case MatchOp: matcher.name = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
const ope = cursor.node.firstChild; break;
if (ope) { case MatchOp:
matcher.type = ope.type.id; const ope = cursor.node.firstChild;
if (ope) {
matcher.type = ope.type.id;
}
break;
case StringLiteral:
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
break;
} }
break; } while (cursor.nextSibling());
case StringLiteral: break;
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); case UnquotedLabelMatcher:
break; if (!cursor.next()) {
} // weird case, that would mean the UnquotedLabelMatcher doesn't have any child.
} while (cursor.nextSibling()); return matcher;
}
do {
switch (cursor.type.id) {
case LabelName:
matcher.name = state.sliceDoc(cursor.from, cursor.to);
break;
case MatchOp:
const ope = cursor.node.firstChild;
if (ope) {
matcher.type = ope.type.id;
}
break;
case StringLiteral:
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
break;
}
} while (cursor.nextSibling());
break;
case QuotedLabelName:
matcher.name = '__name__';
matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1);
matcher.type = EqlSingle;
break;
}
return matcher; return matcher;
} }

View file

@ -204,6 +204,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo and on(test,"blub") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo and on() bar', expr: 'foo and on() bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -214,6 +219,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo and ignoring(test,"blub") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo and ignoring() bar', expr: 'foo and ignoring() bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -229,6 +239,11 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[], expectedDiag: [] as Diagnostic[],
}, },
{
expr: 'foo / on(test,blub) group_left("bar") bar',
expectedValueType: ValueType.vector,
expectedDiag: [] as Diagnostic[],
},
{ {
expr: 'foo / ignoring(test,blub) group_left(blub) bar', expr: 'foo / ignoring(test,blub) group_left(blub) bar',
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
@ -825,6 +840,134 @@ describe('promql operations', () => {
expectedValueType: ValueType.vector, expectedValueType: ValueType.vector,
expectedDiag: [], expectedDiag: [],
}, },
{
expr: '{"foo"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// with metric name in the middle
expr: '{a="b","foo",c~="d"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo", a="bc"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"colon:in:the:middle"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"dot.in.the.middle"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"😀 in metric name"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// quotes with escape
expr: '{"this is \"foo\" metric"}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","colon:in:the:middle"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","dot.in.the.middle"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{"foo","😀 in label name"="val"}',
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
// quotes with escape
expr: '{"foo","this is \"bar\" label"="val"}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: 'foo{"bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 10,
},
],
},
{
expr: '{"foo", __name__="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 23,
},
],
},
{
expr: '{"foo", "__name__"="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 25,
},
],
},
{
expr: '{"__name__"="foo", __name__="bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
to: 34,
},
],
},
{
expr: '{"foo", "bar"}',
expectedValueType: ValueType.vector,
expectedDiag: [
{
from: 0,
to: 14,
message: 'metric name must not be set twice: foo or bar',
severity: 'error',
},
],
},
{
expr: `{'foo\`metric':'bar'}`, // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
{
expr: '{`foo\"metric`=`bar`}', // eslint-disable-line
expectedValueType: ValueType.vector,
expectedDiag: [],
},
]; ];
testCases.forEach((value) => { testCases.forEach((value) => {
const state = createEditorState(value.expr); const state = createEditorState(value.expr);

View file

@ -27,7 +27,6 @@ import {
Gte, Gte,
Gtr, Gtr,
Identifier, Identifier,
LabelMatcher,
LabelMatchers, LabelMatchers,
Lss, Lss,
Lte, Lte,
@ -36,11 +35,14 @@ import {
Or, Or,
ParenExpr, ParenExpr,
Quantile, Quantile,
QuotedLabelMatcher,
QuotedLabelName,
StepInvariantExpr, StepInvariantExpr,
SubqueryExpr, SubqueryExpr,
Topk, Topk,
UnaryExpr, UnaryExpr,
Unless, Unless,
UnquotedLabelMatcher,
VectorSelector, VectorSelector,
} from '@prometheus-io/lezer-promql'; } from '@prometheus-io/lezer-promql';
import { containsAtLeastOneChild } from './path-finder'; import { containsAtLeastOneChild } from './path-finder';
@ -282,7 +284,11 @@ export class Parser {
private checkVectorSelector(node: SyntaxNode): void { private checkVectorSelector(node: SyntaxNode): void {
const matchList = node.getChild(LabelMatchers); const matchList = node.getChild(LabelMatchers);
const labelMatchers = buildLabelMatchers(matchList ? matchList.getChildren(LabelMatcher) : [], this.state); const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher];
let labelMatchers: Matcher[] = [];
for (const labelMatcherOpt of labelMatcherOpts) {
labelMatchers = labelMatchers.concat(buildLabelMatchers(matchList ? matchList.getChildren(labelMatcherOpt) : [], this.state));
}
let vectorSelectorName = ''; let vectorSelectorName = '';
// VectorSelector ( Identifier ) // VectorSelector ( Identifier )
// https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200 // https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200
@ -301,6 +307,14 @@ export class Parser {
// adding the metric name as a Matcher to avoid a false positive for this kind of expression: // adding the metric name as a Matcher to avoid a false positive for this kind of expression:
// foo{bare=''} // foo{bare=''}
labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName)); labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName));
} else {
// In this case when metric name is not set outside the braces
// It is checking whether metric name is set twice like in :
// {__name__:"foo", "foo"}, {"foo", "bar"}
const labelMatchersMetricName = labelMatchers.filter((lm) => lm.name === '__name__');
if (labelMatchersMetricName.length > 1) {
this.addDiagnostic(node, `metric name must not be set twice: ${labelMatchersMetricName[0].value} or ${labelMatchersMetricName[1].value}`);
}
} }
// A Vector selector must contain at least one non-empty matcher to prevent // A Vector selector must contain at least one non-empty matcher to prevent

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.51.2", "version": "0.52.0-rc.1",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",

View file

@ -97,7 +97,7 @@ binModifiers {
} }
GroupingLabels { GroupingLabels {
"(" (LabelName ("," LabelName)* ","?)? ")" "(" ((LabelName | QuotedLabelName) ("," (LabelName | QuotedLabelName))* ","?)? ")"
} }
FunctionCall { FunctionCall {
@ -220,7 +220,7 @@ VectorSelector {
} }
LabelMatchers { LabelMatchers {
"{" (LabelMatcher ("," LabelMatcher)* ","?)? "}" "{" ((UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName)("," (UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName))* ","?)? "}"
} }
MatchOp { MatchOp {
@ -230,8 +230,16 @@ MatchOp {
NeqRegex NeqRegex
} }
LabelMatcher { UnquotedLabelMatcher {
LabelName MatchOp StringLiteral LabelName MatchOp StringLiteral
}
QuotedLabelMatcher {
QuotedLabelName MatchOp StringLiteral
}
QuotedLabelName {
StringLiteral
} }
StepInvariantExpr { StepInvariantExpr {

View file

@ -112,6 +112,54 @@ PromQL(
) )
) )
# Quoted label name in grouping labels
sum by("job", mode) (test_metric) / on("job") group_left sum by("job")(test_metric)
==>
PromQL(
BinaryExpr(
AggregateExpr(
AggregateOp(Sum),
AggregateModifier(
By,
GroupingLabels(
QuotedLabelName(StringLiteral),
LabelName
)
),
FunctionCallBody(
VectorSelector(
Identifier
)
)
),
Div,
MatchingModifierClause(
On,
GroupingLabels(
QuotedLabelName(StringLiteral)
)
GroupLeft
),
AggregateExpr(
AggregateOp(Sum),
AggregateModifier(
By,
GroupingLabels(
QuotedLabelName(StringLiteral)
)
),
FunctionCallBody(
VectorSelector(
Identifier
)
)
)
)
)
# Case insensitivity for aggregations and binop modifiers. # Case insensitivity for aggregations and binop modifiers.
SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2) SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2)
@ -226,25 +274,25 @@ PromQL(
VectorSelector( VectorSelector(
Identifier, Identifier,
LabelMatchers( LabelMatchers(
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(EqlSingle), MatchOp(EqlSingle),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(Neq), MatchOp(Neq),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(EqlRegex), MatchOp(EqlRegex),
StringLiteral StringLiteral
), ),
LabelMatcher( UnquotedLabelMatcher(
LabelName, LabelName,
MatchOp(NeqRegex), MatchOp(NeqRegex),
StringLiteral StringLiteral
) )
) )
) )
@ -571,14 +619,14 @@ PromQL(NumberLiteral)
NaN{foo="bar"} NaN{foo="bar"}
==> ==>
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
# Trying to illegally use Inf as a metric name. # Trying to illegally use Inf as a metric name.
Inf{foo="bar"} Inf{foo="bar"}
==> ==>
PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral)))))
# Negative offset # Negative offset
@ -614,3 +662,24 @@ MetricName(Identifier)
==> ==>
PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier)))) PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier))))
# Testing quoted metric name
{"metric_name"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral))))
# Testing quoted label name
{"foo"="bar"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))
# Testing quoted metric name and label name
{"metric_name", "foo"="bar"}
==>
PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral), QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral))))

View file

@ -1,12 +1,12 @@
{ {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.51.2", "version": "0.52.0-rc.1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.51.2", "version": "0.52.0-rc.1",
"workspaces": [ "workspaces": [
"react-app", "react-app",
"module/*" "module/*"
@ -30,10 +30,10 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.51.2", "version": "0.52.0-rc.1",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.51.2", "@prometheus-io/lezer-promql": "0.52.0-rc.1",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {
@ -69,7 +69,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.51.2", "version": "0.52.0-rc.1",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.5.1", "@lezer/generator": "^1.5.1",
@ -19233,7 +19233,7 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.51.2", "version": "0.52.0-rc.1",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.11.1", "@codemirror/autocomplete": "^6.11.1",
"@codemirror/commands": "^6.3.2", "@codemirror/commands": "^6.3.2",
@ -19251,7 +19251,7 @@
"@lezer/lr": "^1.3.14", "@lezer/lr": "^1.3.14",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.51.2", "@prometheus-io/codemirror-promql": "0.52.0-rc.1",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.2", "downshift": "^7.6.2",

View file

@ -28,5 +28,5 @@
"ts-jest": "^29.1.1", "ts-jest": "^29.1.1",
"typescript": "^4.9.5" "typescript": "^4.9.5"
}, },
"version": "0.51.2" "version": "0.52.0-rc.1"
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.51.2", "version": "0.52.0-rc.1",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.11.1", "@codemirror/autocomplete": "^6.11.1",
@ -19,7 +19,7 @@
"@lezer/lr": "^1.3.14", "@lezer/lr": "^1.3.14",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.51.2", "@prometheus-io/codemirror-promql": "0.52.0-rc.1",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.2", "downshift": "^7.6.2",