Merge branch 'master' into release-2.3

This commit is contained in:
Brian Brazil 2018-07-12 16:44:11 +01:00 committed by GitHub
commit 5b596b97bc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 1323 additions and 623 deletions

View file

@ -3,6 +3,8 @@ version: 2
jobs: jobs:
test: test:
# Whenever the Go version is updated here, .travis.yml should also be
# updated.
docker: docker:
- image: circleci/golang:1.10 - image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/prometheus working_directory: /go/src/github.com/prometheus/prometheus
@ -32,10 +34,6 @@ jobs:
- image: circleci/golang:1.10 - image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/prometheus working_directory: /go/src/github.com/prometheus/prometheus
environment:
DOCKER_IMAGE_NAME: prom/prometheus
QUAY_IMAGE_NAME: quay.io/prometheus/prometheus
steps: steps:
- checkout - checkout
- setup_remote_docker - setup_remote_docker
@ -43,23 +41,19 @@ jobs:
at: . at: .
- run: ln -s .build/linux-amd64/prometheus prometheus - run: ln -s .build/linux-amd64/prometheus prometheus
- run: ln -s .build/linux-amd64/promtool promtool - run: ln -s .build/linux-amd64/promtool promtool
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME - run: make docker
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME - run: make docker DOCKER_REPO=quay.io/prometheus
- run: docker images - run: docker images
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: docker push $DOCKER_IMAGE_NAME - run: make docker-publish
- run: docker push $QUAY_IMAGE_NAME - run: make docker-publish DOCKER_REPO=quay.io/prometheus
docker_hub_release_tags: docker_hub_release_tags:
docker: docker:
- image: circleci/golang:1.10 - image: circleci/golang:1.10
working_directory: /go/src/github.com/prometheus/prometheus working_directory: /go/src/github.com/prometheus/prometheus
environment:
DOCKER_IMAGE_NAME: prom/prometheus
QUAY_IMAGE_NAME: quay.io/prometheus/prometheus
steps: steps:
- checkout - checkout
- setup_remote_docker - setup_remote_docker
@ -77,17 +71,17 @@ jobs:
destination: releases destination: releases
- run: ln -s .build/linux-amd64/prometheus prometheus - run: ln -s .build/linux-amd64/prometheus prometheus
- run: ln -s .build/linux-amd64/promtool promtool - run: ln -s .build/linux-amd64/promtool promtool
- run: make docker DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - run: make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG
- run: make docker DOCKER_IMAGE_NAME=$QUAY_IMAGE_NAME DOCKER_IMAGE_TAG=$CIRCLE_TAG - run: make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG DOCKER_REPO=quay.io/prometheus
- run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD
- run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io
- run: | - run: |
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then
docker tag "$DOCKER_IMAGE_NAME:$CIRCLE_TAG" "$DOCKER_IMAGE_NAME:latest" make docker-tag-latest DOCKER_IMAGE_TAG="$CIRCLE_TAG"
docker tag "$QUAY_IMAGE_NAME:$CIRCLE_TAG" "$QUAY_IMAGE_NAME:latest" make docker-tag-latest DOCKER_IMAGE_TAG="$CIRCLE_TAG" DOCKER_REPO=quay.io/prometheus
fi fi
- run: docker push $DOCKER_IMAGE_NAME - run: make docker-publish
- run: docker push $QUAY_IMAGE_NAME - run: make docker-publish DOCKER_REPO=quay.io/prometheus
workflows: workflows:
version: 2 version: 2

View file

@ -2,9 +2,10 @@ sudo: false
language: go language: go
# Whenever the Go version is updated here, .circleci/config.yml should also be
# updated.
go: go:
- 1.10.x - 1.10.x
- 1.x
go_import_path: github.com/prometheus/prometheus go_import_path: github.com/prometheus/prometheus

View file

@ -27,8 +27,9 @@ ifdef DEBUG
bindata_flags = -debug bindata_flags = -debug
endif endif
.PHONY: assets
assets: assets:
@echo ">> writing assets" @echo ">> writing assets"
@$(GO) get -u github.com/jteeuwen/go-bindata/... @$(GO) get -u github.com/jteeuwen/go-bindata/...
@go-bindata $(bindata_flags) -pkg ui -o web/ui/bindata.go -ignore '(.*\.map|bootstrap\.js|bootstrap-theme\.css|bootstrap\.css)' web/ui/templates/... web/ui/static/... @go-bindata $(bindata_flags) -pkg ui -o web/ui/bindata.go -ignore '(.*\.map|bootstrap\.js|bootstrap-theme\.css|bootstrap\.css)' web/ui/templates/... web/ui/static/...
@$(GO) fmt ./web/ui @$(GO) fmt ./web/ui

View file

@ -36,13 +36,17 @@ pkgs = ./...
PREFIX ?= $(shell pwd) PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd) BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKER_REPO ?= prom
.PHONY: all
all: style staticcheck unused build test all: style staticcheck unused build test
.PHONY: style
style: style:
@echo ">> checking code style" @echo ">> checking code style"
! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' ! $(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
.PHONY: check_license
check_license: check_license:
@echo ">> checking license header" @echo ">> checking license header"
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
@ -53,48 +57,66 @@ check_license:
exit 1; \ exit 1; \
fi fi
.PHONY: test-short
test-short: test-short:
@echo ">> running short tests" @echo ">> running short tests"
$(GO) test -short $(pkgs) $(GO) test -short $(pkgs)
.PHONY: test
test: test:
@echo ">> running all tests" @echo ">> running all tests"
$(GO) test -race $(pkgs) $(GO) test -race $(pkgs)
.PHONY: format
format: format:
@echo ">> formatting code" @echo ">> formatting code"
$(GO) fmt $(pkgs) $(GO) fmt $(pkgs)
.PHONY: vet
vet: vet:
@echo ">> vetting code" @echo ">> vetting code"
$(GO) vet $(pkgs) $(GO) vet $(pkgs)
.PHONY: staticcheck
staticcheck: $(STATICCHECK) staticcheck: $(STATICCHECK)
@echo ">> running staticcheck" @echo ">> running staticcheck"
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
.PHONY: unused
unused: $(GOVENDOR) unused: $(GOVENDOR)
@echo ">> running check for unused packages" @echo ">> running check for unused packages"
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
.PHONY: build
build: promu build: promu
@echo ">> building binaries" @echo ">> building binaries"
$(PROMU) build --prefix $(PREFIX) $(PROMU) build --prefix $(PREFIX)
.PHONY: tarball
tarball: promu tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: docker
docker: docker:
docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
.PHONY: docker-publish
docker-publish:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: docker-tag-latest
docker-tag-latest:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
.PHONY: promu
promu: promu:
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
$(FIRST_GOPATH)/bin/staticcheck: .PHONY: $(STATICCHECK)
$(STATICCHECK):
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
$(FIRST_GOPATH)/bin/govendor: .PHONY: $(GOVENDOR)
$(GOVENDOR):
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck govendor $(FIRST_GOPATH)/bin/govendor

View file

@ -2,9 +2,9 @@
{{ template "prom_right_table_head" }} {{ template "prom_right_table_head" }}
<tr> <tr>
<th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu{job='node',instance='%s'})))" .Params.instance)) }}</th> <th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }}</th>
</tr> </tr>
{{ range printf "sum by (mode)(irate(node_cpu{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }} {{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }}
<tr> <tr>
<td>{{ .Labels.mode | title }} CPU</td> <td>{{ .Labels.mode | title }} CPU</td>
<td>{{ .Value | printf "%.1f" }}%</td> <td>{{ .Value | printf "%.1f" }}%</td>
@ -21,15 +21,15 @@
</tr> </tr>
<tr> <tr>
<td>Forks</td> <td>Forks</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>Context Switches</td> <td>Context Switches</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>Interrupts</td> <td>Interrupts</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>1m Loadavg</td> <td>1m Loadavg</td>
@ -47,9 +47,9 @@
<script> <script>
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#cpuGraph"), node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))", expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
renderer: 'area', renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}}, max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Cores' yTitle: 'Cores'

View file

@ -9,7 +9,7 @@
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#diskioGraph"), node: document.querySelector("#diskioGraph"),
expr: [ expr: [
"irate(node_disk_io_time_ms{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) / 1000 * 100", "irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
], ],
min: 0, min: 0,
name: '[[ device ]]', name: '[[ device ]]',
@ -24,7 +24,7 @@
<script> <script>
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#fsGraph"), node: document.querySelector("#fsGraph"),
expr: "100 - node_filesystem_free{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size{job='node'} * 100", expr: "100 - node_filesystem_avail_bytes{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size_bytes{job='node'} * 100",
min: 0, min: 0,
max: 100, max: 100,
name: '[[ mountpoint ]]', name: '[[ mountpoint ]]',
@ -38,23 +38,23 @@
{{ template "prom_right_table_head" }} {{ template "prom_right_table_head" }}
<th colspan="2">Disks</th> <th colspan="2">Disks</th>
</tr> </tr>
{{ range printf "node_disk_io_time_ms{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} {{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
<th colspan="2">{{ .Labels.device }}</th> <th colspan="2">{{ .Labels.device }}</th>
<tr> <tr>
<td>Utilization</td> <td>Utilization</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
</tr> </tr>
<tr> <tr>
<td>Throughput</td> <td>Throughput</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_sectors_read{job='node',instance='%s',device='%s'}[5m]) * 512 + irate(node_disk_sectors_written{job='node',instance='%s',device='%s'}[5m]) * 512" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>Avg Read Time</td> <td>Avg Read Time</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 / irate(node_disk_reads_completed{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>Avg Write Time</td> <td>Avg Write Time</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 / irate(node_disk_writes_completed{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
</tr> </tr>
{{ end }} {{ end }}
<th colspan="2">Filesystem Fullness</th> <th colspan="2">Filesystem Fullness</th>
@ -62,10 +62,10 @@
{{ define "roughlyNearZero" }} {{ define "roughlyNearZero" }}
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} {{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
{{ end }} {{ end }}
{{ range printf "node_filesystem_size{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} {{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
<tr> <tr>
<td>{{ .Labels.mountpoint }}</td> <td>{{ .Labels.mountpoint }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_free{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
</tr> </tr>
{{ end }} {{ end }}
<tr> <tr>

View file

@ -8,9 +8,9 @@
<script> <script>
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#cpuGraph"), node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))", expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
renderer: 'area', renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}}, max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
yTitle: 'Cores' yTitle: 'Cores'
@ -23,7 +23,7 @@
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#diskioGraph"), node: document.querySelector("#diskioGraph"),
expr: [ expr: [
"irate(node_disk_io_time_ms{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) / 1000 * 100", "irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
], ],
min: 0, min: 0,
name: '[[ device ]]', name: '[[ device ]]',
@ -41,9 +41,9 @@
node: document.querySelector("#memoryGraph"), node: document.querySelector("#memoryGraph"),
renderer: 'area', renderer: 'area',
expr: [ expr: [
"node_memory_Cached{job='node',instance='{{ .Params.instance }}'}", "node_memory_Cached_bytes{job='node',instance='{{ .Params.instance }}'}",
"node_memory_Buffers{job='node',instance='{{ .Params.instance }}'}", "node_memory_Buffers_bytes{job='node',instance='{{ .Params.instance }}'}",
"node_memory_MemTotal{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers{job='node',instance='{{.Params.instance}}'} - node_memory_Cached{job='node',instance='{{.Params.instance}}'}", "node_memory_MemTotal_bytes{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Cached_bytes{job='node',instance='{{.Params.instance}}'}",
"node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}", "node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}",
], ],
name: ["Cached", "Buffers", "Used", "Free"], name: ["Cached", "Buffers", "Used", "Free"],
@ -59,47 +59,47 @@
<tr><th colspan="2">Overview</th></tr> <tr><th colspan="2">Overview</th></tr>
<tr> <tr>
<td>User CPU</td> <td>User CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr> </tr>
<tr> <tr>
<td>System CPU</td> <td>System CPU</td>
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
</tr> </tr>
<tr> <tr>
<td>Memory Total</td> <td>Memory Total</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr> </tr>
<tr> <tr>
<td>Memory Free</td> <td>Memory Free</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
</tr> </tr>
<tr> <tr>
<th colspan="2">Network</th> <th colspan="2">Network</th>
</tr> </tr>
{{ range printf "node_network_receive_bytes{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }} {{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }}
<tr> <tr>
<td>{{ .Labels.device }} Received</td> <td>{{ .Labels.device }} Received</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr> </tr>
<tr> <tr>
<td>{{ .Labels.device }} Transmitted</td> <td>{{ .Labels.device }} Transmitted</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr> </tr>
{{ end }} {{ end }}
</tr> </tr>
<tr> <tr>
<th colspan="2">Disks</th> <th colspan="2">Disks</th>
</tr> </tr>
{{ range printf "node_disk_io_time_ms{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }} {{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }}
<tr> <tr>
<td>{{ .Labels.device }} Utilization</td> <td>{{ .Labels.device }} Utilization</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_ms{job='node',instance='%s',device='%s'}[5m]) / 1000 * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
</tr> </tr>
{{ end }} {{ end }}
{{ range printf "node_disk_io_time_ms{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }} {{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
<tr> <tr>
<td>{{ .Labels.device }} Throughput</td> <td>{{ .Labels.device }} Throughput</td>
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_sectors_read{job='node',instance='%s',device='%s'}[5m]) * 512 + irate(node_disk_sectors_written{job='node',instance='%s',device='%s'}[5m]) * 512" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
</tr> </tr>
{{ end }} {{ end }}
<tr> <tr>
@ -108,10 +108,10 @@
{{ define "roughlyNearZero" }} {{ define "roughlyNearZero" }}
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }} {{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
{{ end }} {{ end }}
{{ range printf "node_filesystem_size{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }} {{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
<tr> <tr>
<td>{{ .Labels.mountpoint }}</td> <td>{{ .Labels.mountpoint }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_free{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
</tr> </tr>
{{ end }} {{ end }}
</tr> </tr>

View file

@ -21,8 +21,8 @@
<tr> <tr>
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td> <td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td> <td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu_seconds_total{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree{job='node',instance='%s'} + node_memory_Cached{job='node',instance='%s'} + node_memory_Buffers{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
</tr> </tr>
{{ else }} {{ else }}
<tr><td colspan=4>No nodes found.</td></tr> <tr><td colspan=4>No nodes found.</td></tr>

View file

@ -108,7 +108,7 @@ type SDConfig struct {
// See https://www.consul.io/api/catalog.html#list-services // See https://www.consul.io/api/catalog.html#list-services
// The list of services for which targets are discovered. // The list of services for which targets are discovered.
// Defaults to all services if empty. // Defaults to all services if empty.
Services []string `yaml:"services"` Services []string `yaml:"services,omitempty"`
// An optional tag used to filter instances inside a service. A single tag is supported // An optional tag used to filter instances inside a service. A single tag is supported
// here to match the Consul API. // here to match the Consul API.
ServiceTag string `yaml:"tag,omitempty"` ServiceTag string `yaml:"tag,omitempty"`

View file

@ -186,16 +186,16 @@ func (s *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {
for _, rule := range ingress.Spec.Rules { for _, rule := range ingress.Spec.Rules {
paths := pathsFromIngressRule(&rule.IngressRuleValue) paths := pathsFromIngressRule(&rule.IngressRuleValue)
schema := "http" scheme := "http"
_, isTLS := tlsHosts[rule.Host] _, isTLS := tlsHosts[rule.Host]
if isTLS { if isTLS {
schema = "https" scheme = "https"
} }
for _, path := range paths { for _, path := range paths {
tg.Targets = append(tg.Targets, model.LabelSet{ tg.Targets = append(tg.Targets, model.LabelSet{
model.AddressLabel: lv(rule.Host), model.AddressLabel: lv(rule.Host),
ingressSchemeLabel: lv(schema), ingressSchemeLabel: lv(scheme),
ingressHostLabel: lv(rule.Host), ingressHostLabel: lv(rule.Host),
ingressPathLabel: lv(path), ingressPathLabel: lv(path),
}) })

View file

@ -84,13 +84,13 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
// SDConfig is the configuration for Kubernetes service discovery. // SDConfig is the configuration for Kubernetes service discovery.
type SDConfig struct { type SDConfig struct {
APIServer config_util.URL `yaml:"api_server"` APIServer config_util.URL `yaml:"api_server,omitempty"`
Role Role `yaml:"role"` Role Role `yaml:"role"`
BasicAuth *config_util.BasicAuth `yaml:"basic_auth,omitempty"` BasicAuth *config_util.BasicAuth `yaml:"basic_auth,omitempty"`
BearerToken config_util.Secret `yaml:"bearer_token,omitempty"` BearerToken config_util.Secret `yaml:"bearer_token,omitempty"`
BearerTokenFile string `yaml:"bearer_token_file,omitempty"` BearerTokenFile string `yaml:"bearer_token_file,omitempty"`
TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"` TLSConfig config_util.TLSConfig `yaml:"tls_config,omitempty"`
NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces"` NamespaceDiscovery NamespaceDiscovery `yaml:"namespaces,omitempty"`
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.

View file

@ -18,7 +18,7 @@ alert: InstanceDown
expr: up == 0 expr: up == 0
for: 5m for: 5m
labels: labels:
- severity: page severity: page
annotations: annotations:
summary: "Instance {{$labels.instance}} down" summary: "Instance {{$labels.instance}} down"
description: "{{$labels.instance}} of job {{$labels.job}} has been down for more than 5 minutes." description: "{{$labels.instance}} of job {{$labels.job}} has been down for more than 5 minutes."

View file

@ -132,7 +132,7 @@ URL query parameters:
- `query=<string>`: Prometheus expression query string. - `query=<string>`: Prometheus expression query string.
- `start=<rfc3339 | unix_timestamp>`: Start timestamp. - `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp. - `end=<rfc3339 | unix_timestamp>`: End timestamp.
- `step=<duration>`: Query resolution step width. - `step=<duration | float>`: Query resolution step width in `duration` format or float number of seconds.
- `timeout=<duration>`: Evaluation timeout. Optional. Defaults to and - `timeout=<duration>`: Evaluation timeout. Optional. Defaults to and
is capped by the value of the `-query.timeout` flag. is capped by the value of the `-query.timeout` flag.
@ -329,7 +329,7 @@ Both the active and dropped targets are part of the response.
```json ```json
$ curl http://localhost:9090/api/v1/targets $ curl http://localhost:9090/api/v1/targets
{ {
"status": "success", [3/11] "status": "success",
"data": { "data": {
"activeTargets": [ "activeTargets": [
{ {
@ -363,6 +363,88 @@ $ curl http://localhost:9090/api/v1/targets
} }
``` ```
## Querying target metadata
The following endpoint returns metadata about metrics currently scraped by targets.
This is **experimental** and might change in the future.
```
GET /api/v1/targets/metadata
```
URL query parameters:
- `match_target=<label_selectors>`: Label selectors that match targets by their label sets. All targets are selected if left empty.
- `metric=<string>`: A metric name to retrieve metadata for. All metric metadata is retrieved if left empty.
- `limit=<number>`: Maximum number of targets to match.
The `data` section of the query result consists of a list of objects that
contain metric metadata and the target label set.
The following example returns all metadata entries for the `go_goroutines` metric
from the first two targets with label `job="prometheus"`.
```json
curl -G http://localhost:9091/api/v1/targets/metadata \
--data-urlencode 'metric=go_goroutines' \
--data-urlencode 'match_target={job="prometheus"}' \
--data-urlencode 'limit=2'
{
"status": "success",
"data": [
{
"target": {
"instance": "127.0.0.1:9090",
"job": "prometheus"
},
"type": "gauge",
"help": "Number of goroutines that currently exist."
},
{
"target": {
"instance": "127.0.0.1:9091",
"job": "prometheus"
},
"type": "gauge",
"help": "Number of goroutines that currently exist."
}
]
}
```
The following example returns metadata for all metrics for all targets with
label `instance="127.0.0.1:9090`.
```json
curl -G http://localhost:9091/api/v1/targets/metadata \
--data-urlencode 'match_target={instance="127.0.0.1:9090"}'
{
"status": "success",
"data": [
// ...
{
"target": {
"instance": "127.0.0.1:9090",
"job": "prometheus"
},
"metric": "prometheus_treecache_zookeeper_failures_total",
"type": "counter",
"help": "The total number of ZooKeeper failures."
},
{
"target": {
"instance": "127.0.0.1:9090",
"job": "prometheus"
},
"metric": "prometheus_tsdb_reloads_total",
"type": "counter",
"help": "Number of times the database reloaded block data from disk."
},
// ...
]
}
```
## Alertmanagers ## Alertmanagers
The following endpoint returns an overview of the current state of the The following endpoint returns an overview of the current state of the

View file

@ -36,7 +36,7 @@ import (
var ( var (
a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.") a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.")
outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String() outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String()
listenAddress = a.Flag("listen.address", "The address the HTTP sd is listening on for requests.").Default("localhost:8080").String() listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String()
logger log.Logger logger log.Logger
// addressLabel is the name for the label containing a target's address. // addressLabel is the name for the label containing a target's address.
@ -196,7 +196,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
tgs = append(tgs, tg) tgs = append(tgs, tg)
} }
if err != nil { if err == nil {
// We're returning all Consul services as a single targetgroup. // We're returning all Consul services as a single targetgroup.
ch <- tgs ch <- tgs
} }
@ -236,7 +236,7 @@ func main() {
// NOTE: create an instance of your new SD implementation here. // NOTE: create an instance of your new SD implementation here.
cfg := sdConfig{ cfg := sdConfig{
TagSeparator: ",", TagSeparator: ",",
Address: "localhost:8500", Address: *listenAddress,
RefreshInterval: 30, RefreshInterval: 30,
} }

View file

@ -15,48 +15,38 @@
package textparse package textparse
import ( import (
"fmt" "fmt"
"math"
"strconv"
"unicode/utf8"
"github.com/prometheus/prometheus/pkg/value"
) )
const ( const (
lstateInit = iota sInit = iota
lstateName sComment
lstateValue sMeta1
lstateTimestamp sMeta2
lstateLabels sLabels
lstateLName sLValue
lstateLEq sValue
lstateLValue sTimestamp
lstateLValueIn
) )
// Lex is called by the parser generated by "go tool yacc" to obtain each // Lex is called by the parser generated by "go tool yacc" to obtain each
// token. The method is opened before the matching rules block and closed at // token. The method is opened before the matching rules block and closed at
// the end of the file. // the end of the file.
func (l *lexer) Lex() int { func (l *lexer) Lex() token {
l.state = lstateInit
if l.i >= len(l.b) { if l.i >= len(l.b) {
return eof return tEOF
} }
c := l.b[l.i] c := l.b[l.i]
l.start = l.i
l.ts = nil
l.mstart = l.nextMstart
l.offsets = l.offsets[:0]
%} %}
D [0-9] D [0-9]
L [a-zA-Z_] L [a-zA-Z_]
M [a-zA-Z_:] M [a-zA-Z_:]
C [^\n]
%x lstateName lstateValue lstateTimestamp lstateLabels lstateLName lstateLEq lstateLValue lstateLValueIn %x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp
%yyc c %yyc c
%yyn c = l.next() %yyn c = l.next()
@ -65,65 +55,46 @@ M [a-zA-Z_:]
%% %%
\0 return eof \0 return tEOF
#[^\r\n]*\n l.mstart = l.i \n l.state = sInit; return tLinebreak
[\r\n \t]+ l.mstart = l.i <*>[ \t]+ return tWhitespace
{M}({M}|{D})* l.state = lstateName #[ \t]+ l.state = sComment
l.offsets = append(l.offsets, l.i) # return l.consumeComment()
l.mend = l.i <sComment>HELP[\t ]+ l.state = sMeta1; return tHelp
<sComment>TYPE[\t ]+ l.state = sMeta1; return tType
<sMeta1>{M}({M}|{D})* l.state = sMeta2; return tMName
<sMeta2>{C}+ l.state = sInit; return tText
<lstateName>([ \t]*)\{ l.state = lstateLabels {M}({M}|{D})* l.state = sValue; return tMName
<sValue>\{ l.state = sLabels; return tBraceOpen
<lstateName>[ \t]+ l.state = lstateValue <sLabels>{L}({L}|{D})* return tLName
l.vstart = l.i <sLabels>\} l.state = sValue; return tBraceClose
<sLabels>= l.state = sLValue; return tEqual
<sLabels>, return tComma
<lstateLabels>[ \t]+ <sLValue>\"(\\.|[^\\"])*\" l.state = sLabels; return tLValue
<lstateLabels>,?\} l.state = lstateValue <sValue>[^{ \t\n]+ l.state = sTimestamp; return tValue
l.mend = l.i <sTimestamp>{D}+ return tTimestamp
<lstateLabels>(,?[ \t]*) l.state = lstateLName <sTimestamp>\n l.state = sInit; return tLinebreak
l.offsets = append(l.offsets, l.i)
<lstateLName>{L}({L}|{D})* l.state = lstateLEq
l.offsets = append(l.offsets, l.i)
<lstateLEq>[ \t]*= l.state = lstateLValue
<lstateLValue>[ \t]+
<lstateLValue>\" l.state = lstateLValueIn
l.offsets = append(l.offsets, l.i)
<lstateLValueIn>(\\.|[^\\"])*\" l.state = lstateLabels
if !utf8.Valid(l.b[l.offsets[len(l.offsets)-1]:l.i-1]) {
l.err = fmt.Errorf("invalid UTF-8 label value")
return -1
}
l.offsets = append(l.offsets, l.i-1)
<lstateValue>[ \t]+ l.vstart = l.i
<lstateValue>(NaN) l.val = math.Float64frombits(value.NormalNaN)
l.state = lstateTimestamp
<lstateValue>[^\n \t\r]+ // We don't parse strictly correct floats as the conversion
// repeats the effort anyway.
l.val, l.err = strconv.ParseFloat(yoloString(l.b[l.vstart:l.i]), 64)
if l.err != nil {
return -1
}
l.state = lstateTimestamp
<lstateTimestamp>[ \t]+ l.tstart = l.i
<lstateTimestamp>{D}+ ts, err := strconv.ParseInt(yoloString(l.b[l.tstart:l.i]), 10, 64)
if err != nil {
l.err = err
return -1
}
l.ts = &ts
<lstateTimestamp>[\r\n]+ l.nextMstart = l.i
return 1
<lstateTimestamp>\0 return 1
%% %%
l.err = fmt.Errorf("no token found") // Workaround to gobble up comments that started with a HELP or TYPE
return -1 // prefix. We just consume all characters until we reach a newline.
// This saves us from adding disproportionate complexity to the parser.
if l.state == sComment {
return l.consumeComment()
}
return tInvalid
}
func (l *lexer) consumeComment() token {
for c := l.cur(); ; c = l.next() {
switch c {
case 0:
return tEOF
case '\n':
l.state = sInit
return tComment
}
}
} }

View file

@ -17,39 +17,28 @@ package textparse
import ( import (
"fmt" "fmt"
"math"
"strconv"
"unicode/utf8"
"github.com/prometheus/prometheus/pkg/value"
) )
const ( const (
lstateInit = iota sInit = iota
lstateName sComment
lstateValue sMeta1
lstateTimestamp sMeta2
lstateLabels sLabels
lstateLName sLValue
lstateLEq sValue
lstateLValue sTimestamp
lstateLValueIn
) )
// Lex is called by the parser generated by "go tool yacc" to obtain each // Lex is called by the parser generated by "go tool yacc" to obtain each
// token. The method is opened before the matching rules block and closed at // token. The method is opened before the matching rules block and closed at
// the end of the file. // the end of the file.
func (l *lexer) Lex() int { func (l *lexer) Lex() token {
l.state = lstateInit
if l.i >= len(l.b) { if l.i >= len(l.b) {
return eof return tEOF
} }
c := l.b[l.i] c := l.b[l.i]
l.start = l.i
l.ts = nil
l.mstart = l.nextMstart
l.offsets = l.offsets[:0]
yystate0: yystate0:
@ -58,22 +47,20 @@ yystate0:
panic(fmt.Errorf(`invalid start condition %d`, yyt)) panic(fmt.Errorf(`invalid start condition %d`, yyt))
case 0: // start condition: INITIAL case 0: // start condition: INITIAL
goto yystart1 goto yystart1
case 1: // start condition: lstateName case 1: // start condition: sComment
goto yystart7 goto yystart8
case 2: // start condition: lstateValue case 2: // start condition: sMeta1
goto yystart10 goto yystart19
case 3: // start condition: lstateTimestamp case 3: // start condition: sMeta2
goto yystart16
case 4: // start condition: lstateLabels
goto yystart21 goto yystart21
case 5: // start condition: lstateLName case 4: // start condition: sLabels
goto yystart26 goto yystart24
case 6: // start condition: lstateLEq case 5: // start condition: sLValue
goto yystart28 goto yystart29
case 7: // start condition: lstateLValue case 6: // start condition: sValue
goto yystart31 goto yystart33
case 8: // start condition: lstateLValueIn case 7: // start condition: sTimestamp
goto yystart34 goto yystart36
} }
goto yystate0 // silence unused label error goto yystate0 // silence unused label error
@ -85,10 +72,12 @@ yystart1:
default: default:
goto yyabort goto yyabort
case c == '#': case c == '#':
goto yystate4 goto yystate5
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate6 goto yystate7
case c == '\t' || c == '\n' || c == '\r' || c == ' ': case c == '\n':
goto yystate4
case c == '\t' || c == ' ':
goto yystate3 goto yystate3
case c == '\x00': case c == '\x00':
goto yystate2 goto yystate2
@ -103,74 +92,71 @@ yystate3:
switch { switch {
default: default:
goto yyrule3 goto yyrule3
case c == '\t' || c == '\n' || c == '\r' || c == ' ': case c == '\t' || c == ' ':
goto yystate3 goto yystate3
} }
yystate4: yystate4:
c = l.next() c = l.next()
switch { goto yyrule2
default:
goto yyabort
case c == '\n':
goto yystate5
case c >= '\x01' && c <= '\t' || c == '\v' || c == '\f' || c >= '\x0e' && c <= 'ÿ':
goto yystate4
}
yystate5: yystate5:
c = l.next() c = l.next()
goto yyrule2 switch {
default:
goto yyrule5
case c == '\t' || c == ' ':
goto yystate6
}
yystate6: yystate6:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule4 goto yyrule4
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': case c == '\t' || c == ' ':
goto yystate6 goto yystate6
} }
goto yystate7 // silence unused label error
yystate7: yystate7:
c = l.next() c = l.next()
yystart7: switch {
default:
goto yyrule10
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate7
}
goto yystate8 // silence unused label error
yystate8:
c = l.next()
yystart8:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == '\t' || c == ' ': case c == 'H':
goto yystate8
case c == '{':
goto yystate9 goto yystate9
} case c == 'T':
goto yystate14
yystate8:
c = l.next()
switch {
default:
goto yyrule6
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate8 goto yystate3
case c == '{':
goto yystate9
} }
yystate9: yystate9:
c = l.next() c = l.next()
goto yyrule5
goto yystate10 // silence unused label error
yystate10:
c = l.next()
yystart10:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == 'N': case c == 'E':
goto yystate13 goto yystate10
case c == '\t' || c == ' ': }
goto yystate12
case c >= '\x01' && c <= '\b' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\x1f' || c >= '!' && c <= 'M' || c >= 'O' && c <= 'ÿ': yystate10:
c = l.next()
switch {
default:
goto yyabort
case c == 'L':
goto yystate11 goto yystate11
} }
@ -178,96 +164,93 @@ yystate11:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule17 goto yyabort
case c >= '\x01' && c <= '\b' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\x1f' || c >= '!' && c <= 'ÿ': case c == 'P':
goto yystate11 goto yystate12
} }
yystate12: yystate12:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule15 goto yyabort
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate12 goto yystate13
} }
yystate13: yystate13:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule17 goto yyrule6
case c == 'a': case c == '\t' || c == ' ':
goto yystate14 goto yystate13
case c >= '\x01' && c <= '\b' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\x1f' || c >= '!' && c <= '`' || c >= 'b' && c <= 'ÿ':
goto yystate11
} }
yystate14: yystate14:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule17 goto yyabort
case c == 'N': case c == 'Y':
goto yystate15 goto yystate15
case c >= '\x01' && c <= '\b' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\x1f' || c >= '!' && c <= 'M' || c >= 'O' && c <= 'ÿ':
goto yystate11
} }
yystate15: yystate15:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule16 goto yyabort
case c >= '\x01' && c <= '\b' || c == '\v' || c == '\f' || c >= '\x0e' && c <= '\x1f' || c >= '!' && c <= 'ÿ': case c == 'P':
goto yystate11 goto yystate16
} }
goto yystate16 // silence unused label error
yystate16: yystate16:
c = l.next() c = l.next()
yystart16:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == '\n' || c == '\r': case c == 'E':
goto yystate19
case c == '\t' || c == ' ':
goto yystate18
case c == '\x00':
goto yystate17 goto yystate17
case c >= '0' && c <= '9':
goto yystate20
} }
yystate17: yystate17:
c = l.next() c = l.next()
goto yyrule21 switch {
default:
goto yyabort
case c == '\t' || c == ' ':
goto yystate18
}
yystate18: yystate18:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule18 goto yyrule7
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate18 goto yystate18
} }
goto yystate19 // silence unused label error
yystate19: yystate19:
c = l.next() c = l.next()
yystart19:
switch { switch {
default: default:
goto yyrule20 goto yyabort
case c == '\n' || c == '\r': case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate19 goto yystate20
case c == '\t' || c == ' ':
goto yystate3
} }
yystate20: yystate20:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule19 goto yyrule8
case c >= '0' && c <= '9': case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate20 goto yystate20
} }
@ -277,21 +260,19 @@ yystate21:
yystart21: yystart21:
switch { switch {
default: default:
goto yyrule9 goto yyabort
case c == ',':
goto yystate23
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate23
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
goto yystate22 goto yystate22
case c == '}':
goto yystate25
} }
yystate22: yystate22:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule7 goto yyrule9
case c == '\t' || c == ' ': case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate22 goto yystate22
} }
@ -299,269 +280,271 @@ yystate23:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule9 goto yyrule3
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate24 goto yystate23
case c == '}': case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
goto yystate25 goto yystate22
} }
goto yystate24 // silence unused label error
yystate24: yystate24:
c = l.next() c = l.next()
yystart24:
switch { switch {
default: default:
goto yyrule9 goto yyabort
case c == ',':
goto yystate25
case c == '=':
goto yystate26
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate24 goto yystate3
case c == '}':
goto yystate28
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate27
} }
yystate25: yystate25:
c = l.next() c = l.next()
goto yyrule8 goto yyrule15
goto yystate26 // silence unused label error
yystate26: yystate26:
c = l.next() c = l.next()
yystart26: goto yyrule14
switch {
default:
goto yyabort
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate27
}
yystate27: yystate27:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule10 goto yyrule12
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
goto yystate27 goto yystate27
} }
goto yystate28 // silence unused label error
yystate28: yystate28:
c = l.next() c = l.next()
yystart28: goto yyrule13
switch {
default:
goto yyabort
case c == '=':
goto yystate30
case c == '\t' || c == ' ':
goto yystate29
}
goto yystate29 // silence unused label error
yystate29: yystate29:
c = l.next() c = l.next()
switch { yystart29:
default:
goto yyabort
case c == '=':
goto yystate30
case c == '\t' || c == ' ':
goto yystate29
}
yystate30:
c = l.next()
goto yyrule11
goto yystate31 // silence unused label error
yystate31:
c = l.next()
yystart31:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == '"': case c == '"':
goto yystate33 goto yystate30
case c == '\t' || c == ' ': case c == '\t' || c == ' ':
goto yystate32 goto yystate3
} }
yystate30:
c = l.next()
switch {
default:
goto yyabort
case c == '"':
goto yystate31
case c == '\\':
goto yystate32
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
goto yystate30
}
yystate31:
c = l.next()
goto yyrule16
yystate32: yystate32:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyrule12 goto yyabort
case c == '\t' || c == ' ': case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
goto yystate32 goto yystate30
} }
goto yystate33 // silence unused label error
yystate33: yystate33:
c = l.next() c = l.next()
goto yyrule13 yystart33:
goto yystate34 // silence unused label error
yystate34:
c = l.next()
yystart34:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == '"': case c == '\t' || c == ' ':
goto yystate36 goto yystate3
case c == '\\': case c == '{':
goto yystate37
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
goto yystate35 goto yystate35
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
goto yystate34
}
yystate34:
c = l.next()
switch {
default:
goto yyrule17
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
goto yystate34
} }
yystate35: yystate35:
c = l.next() c = l.next()
goto yyrule11
goto yystate36 // silence unused label error
yystate36:
c = l.next()
yystart36:
switch { switch {
default: default:
goto yyabort goto yyabort
case c == '"': case c == '\n':
goto yystate36
case c == '\\':
goto yystate37 goto yystate37
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': case c == '\t' || c == ' ':
goto yystate35 goto yystate3
case c >= '0' && c <= '9':
goto yystate38
} }
yystate36:
c = l.next()
goto yyrule14
yystate37: yystate37:
c = l.next()
goto yyrule19
yystate38:
c = l.next() c = l.next()
switch { switch {
default: default:
goto yyabort goto yyrule18
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': case c >= '0' && c <= '9':
goto yystate35 goto yystate38
} }
yyrule1: // \0 yyrule1: // \0
{ {
return eof return tEOF
} }
yyrule2: // #[^\r\n]*\n yyrule2: // \n
{ {
l.mstart = l.i l.state = sInit
return tLinebreak
goto yystate0 goto yystate0
} }
yyrule3: // [\r\n \t]+ yyrule3: // [ \t]+
{ {
l.mstart = l.i return tWhitespace
}
yyrule4: // #[ \t]+
{
l.state = sComment
goto yystate0 goto yystate0
} }
yyrule4: // {M}({M}|{D})* yyrule5: // #
{ {
l.state = lstateName return l.consumeComment()
l.offsets = append(l.offsets, l.i) }
l.mend = l.i yyrule6: // HELP[\t ]+
{
l.state = sMeta1
return tHelp
goto yystate0 goto yystate0
} }
yyrule5: // ([ \t]*)\{ yyrule7: // TYPE[\t ]+
{ {
l.state = lstateLabels l.state = sMeta1
return tType
goto yystate0 goto yystate0
} }
yyrule6: // [ \t]+ yyrule8: // {M}({M}|{D})*
{ {
l.state = lstateValue l.state = sMeta2
l.vstart = l.i return tMName
goto yystate0 goto yystate0
} }
yyrule7: // [ \t]+ yyrule9: // {C}+
goto yystate0
yyrule8: // ,?\}
{ {
l.state = lstateValue l.state = sInit
l.mend = l.i return tText
goto yystate0 goto yystate0
} }
yyrule9: // (,?[ \t]*) yyrule10: // {M}({M}|{D})*
{ {
l.state = lstateLName l.state = sValue
l.offsets = append(l.offsets, l.i) return tMName
goto yystate0 goto yystate0
} }
yyrule10: // {L}({L}|{D})* yyrule11: // \{
{ {
l.state = lstateLEq l.state = sLabels
l.offsets = append(l.offsets, l.i) return tBraceOpen
goto yystate0 goto yystate0
} }
yyrule11: // [ \t]*= yyrule12: // {L}({L}|{D})*
{ {
l.state = lstateLValue return tLName
}
yyrule13: // \}
{
l.state = sValue
return tBraceClose
goto yystate0 goto yystate0
} }
yyrule12: // [ \t]+ yyrule14: // =
goto yystate0
yyrule13: // \"
{ {
l.state = lstateLValueIn l.state = sLValue
l.offsets = append(l.offsets, l.i) return tEqual
goto yystate0 goto yystate0
} }
yyrule14: // (\\.|[^\\"])*\" yyrule15: // ,
{ {
l.state = lstateLabels return tComma
if !utf8.Valid(l.b[l.offsets[len(l.offsets)-1] : l.i-1]) { }
l.err = fmt.Errorf("invalid UTF-8 label value") yyrule16: // \"(\\.|[^\\"])*\"
return -1 {
} l.state = sLabels
l.offsets = append(l.offsets, l.i-1) return tLValue
goto yystate0 goto yystate0
} }
yyrule15: // [ \t]+ yyrule17: // [^{ \t\n]+
{ {
l.vstart = l.i l.state = sTimestamp
return tValue
goto yystate0 goto yystate0
} }
yyrule16: // (NaN) yyrule18: // {D}+
{ {
l.val = math.Float64frombits(value.NormalNaN) return tTimestamp
l.state = lstateTimestamp }
yyrule19: // \n
{
l.state = sInit
return tLinebreak
goto yystate0 goto yystate0
} }
yyrule17: // [^\n \t\r]+
{
// We don't parse strictly correct floats as the conversion
// repeats the effort anyway.
l.val, l.err = strconv.ParseFloat(yoloString(l.b[l.vstart:l.i]), 64)
if l.err != nil {
return -1
}
l.state = lstateTimestamp
goto yystate0
}
yyrule18: // [ \t]+
{
l.tstart = l.i
goto yystate0
}
yyrule19: // {D}+
{
ts, err := strconv.ParseInt(yoloString(l.b[l.tstart:l.i]), 10, 64)
if err != nil {
l.err = err
return -1
}
l.ts = &ts
goto yystate0
}
yyrule20: // [\r\n]+
{
l.nextMstart = l.i
return 1
}
yyrule21: // \0
{
return 1
}
panic("unreachable") panic("unreachable")
goto yyabort // silence unused label error goto yyabort // silence unused label error
yyabort: // no lexem recognized yyabort: // no lexem recognized
l.err = fmt.Errorf("no token found") // Workaround to gobble up comments that started with a HELP or TYPE
return -1 // prefix. We just consume all characters until we reach a newline.
// This saves us from adding disproportionate complexity to the parser.
if l.state == sComment {
return l.consumeComment()
}
return tInvalid
}
func (l *lexer) consumeComment() token {
for c := l.cur(); ; c = l.next() {
switch c {
case 0:
return tEOF
case '\n':
l.state = sInit
return tComment
}
}
} }

View file

@ -19,45 +19,114 @@ package textparse
import ( import (
"errors" "errors"
"fmt"
"io" "io"
"math"
"sort" "sort"
"strconv"
"strings" "strings"
"unicode/utf8"
"unsafe" "unsafe"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
) )
type lexer struct { type lexer struct {
b []byte b []byte
i int i int
vstart int start int
tstart int err error
err error
val float64
ts *int64
offsets []int
mstart, mend int
nextMstart int
state int state int
} }
const eof = 0 type token int
const (
tInvalid token = -1
tEOF token = 0
tLinebreak token = iota
tWhitespace
tHelp
tType
tText
tComment
tBlank
tMName
tBraceOpen
tBraceClose
tLName
tLValue
tComma
tEqual
tTimestamp
tValue
)
func (t token) String() string {
switch t {
case tInvalid:
return "INVALID"
case tEOF:
return "EOF"
case tLinebreak:
return "LINEBREAK"
case tWhitespace:
return "WHITESPACE"
case tHelp:
return "HELP"
case tType:
return "TYPE"
case tText:
return "TEXT"
case tComment:
return "COMMENT"
case tBlank:
return "BLANK"
case tMName:
return "MNAME"
case tBraceOpen:
return "BOPEN"
case tBraceClose:
return "BCLOSE"
case tLName:
return "LNAME"
case tLValue:
return "LVALUE"
case tEqual:
return "EQUAL"
case tComma:
return "COMMA"
case tTimestamp:
return "TIMESTAMP"
case tValue:
return "VALUE"
}
return fmt.Sprintf("<invalid: %d>", t)
}
// buf returns the buffer of the current token.
func (l *lexer) buf() []byte {
return l.b[l.start:l.i]
}
func (l *lexer) cur() byte {
return l.b[l.i]
}
// next advances the lexer to the next character.
func (l *lexer) next() byte { func (l *lexer) next() byte {
l.i++ l.i++
if l.i >= len(l.b) { if l.i >= len(l.b) {
l.err = io.EOF l.err = io.EOF
return eof return byte(tEOF)
} }
c := l.b[l.i] // Lex struggles with null bytes. If we are in a label value or help string, where
// they are allowed, consume them here immediately.
// Consume null byte when encountered in label-value. for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
if c == eof && (l.state == lstateLValueIn || l.state == lstateLValue) { l.i++
return l.next()
} }
return c return l.b[l.i]
} }
func (l *lexer) Error(es string) { func (l *lexer) Error(es string) {
@ -67,43 +136,56 @@ func (l *lexer) Error(es string) {
// Parser parses samples from a byte slice of samples in the official // Parser parses samples from a byte slice of samples in the official
// Prometheus text exposition format. // Prometheus text exposition format.
type Parser struct { type Parser struct {
l *lexer l *lexer
err error series []byte
val float64 text []byte
mtype MetricType
val float64
ts int64
hasTS bool
start int
offsets []int
} }
// New returns a new parser of the byte slice. // New returns a new parser of the byte slice.
func New(b []byte) *Parser { func New(b []byte) *Parser {
return &Parser{l: &lexer{b: b}} return &Parser{l: &lexer{b: append(b, '\n')}}
} }
// Next advances the parser to the next sample. It returns false if no // Series returns the bytes of the series, the timestamp if set, and the value
// more samples were read or an error occurred.
func (p *Parser) Next() bool {
switch p.l.Lex() {
case -1, eof:
return false
case 1:
return true
}
panic("unexpected")
}
// At returns the bytes of the metric, the timestamp if set, and the value
// of the current sample. // of the current sample.
func (p *Parser) At() ([]byte, *int64, float64) { func (p *Parser) Series() ([]byte, *int64, float64) {
return p.l.b[p.l.mstart:p.l.mend], p.l.ts, p.l.val if p.hasTS {
return p.series, &p.ts, p.val
}
return p.series, nil, p.val
} }
// Err returns the current error. // Help returns the metric name and help text in the current entry.
func (p *Parser) Err() error { // Must only be called after Next returned a help entry.
if p.err != nil { // The returned byte slices become invalid after the next call to Next.
return p.err func (p *Parser) Help() ([]byte, []byte) {
m := p.l.b[p.offsets[0]:p.offsets[1]]
// Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
return m, []byte(helpReplacer.Replace(string(p.text)))
} }
if p.l.err == io.EOF { return m, p.text
return nil }
}
return p.l.err // Type returns the metric name and type in the current entry.
// Must only be called after Next returned a type entry.
// The returned byte slices become invalid after the next call to Next.
func (p *Parser) Type() ([]byte, MetricType) {
return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
}
// Comment returns the text of the current comment.
// Must only be called after Next returned a comment entry.
// The returned byte slice becomes invalid after the next call to Next.
func (p *Parser) Comment() []byte {
return p.text
} }
// Metric writes the labels of the current sample into the passed labels. // Metric writes the labels of the current sample into the passed labels.
@ -111,39 +193,222 @@ func (p *Parser) Err() error {
func (p *Parser) Metric(l *labels.Labels) string { func (p *Parser) Metric(l *labels.Labels) string {
// Allocate the full immutable string immediately, so we just // Allocate the full immutable string immediately, so we just
// have to create references on it below. // have to create references on it below.
s := string(p.l.b[p.l.mstart:p.l.mend]) s := string(p.series)
*l = append(*l, labels.Label{ *l = append(*l, labels.Label{
Name: labels.MetricName, Name: labels.MetricName,
Value: s[:p.l.offsets[0]-p.l.mstart], Value: s[:p.offsets[0]-p.start],
}) })
for i := 1; i < len(p.l.offsets); i += 4 { for i := 1; i < len(p.offsets); i += 4 {
a := p.l.offsets[i] - p.l.mstart a := p.offsets[i] - p.start
b := p.l.offsets[i+1] - p.l.mstart b := p.offsets[i+1] - p.start
c := p.l.offsets[i+2] - p.l.mstart c := p.offsets[i+2] - p.start
d := p.l.offsets[i+3] - p.l.mstart d := p.offsets[i+3] - p.start
// Replacer causes allocations. Replace only when necessary. // Replacer causes allocations. Replace only when necessary.
if strings.IndexByte(s[c:d], byte('\\')) >= 0 { if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
*l = append(*l, labels.Label{Name: s[a:b], Value: replacer.Replace(s[c:d])}) *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
continue continue
} }
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
} }
// Sort labels. We can skip the first entry since the metric name is
// already at the right place.
sort.Sort((*l)[1:]) sort.Sort((*l)[1:])
return s return s
} }
var replacer = strings.NewReplacer( // nextToken returns the next token from the lexer. It skips over tabs
`\"`, `"`, // and spaces.
`\\`, `\`, func (p *Parser) nextToken() token {
`\n`, ` for {
`, if tok := p.l.Lex(); tok != tWhitespace {
`\t`, ` `, return tok
}
}
}
// Entry represents the type of a parsed entry.
type Entry int
const (
EntryInvalid Entry = -1
EntryType Entry = 0
EntryHelp Entry = 1
EntrySeries Entry = 2
EntryComment Entry = 3
)
// MetricType represents metric type values.
type MetricType string
const (
MetricTypeCounter = "counter"
MetricTypeGauge = "gauge"
MetricTypeHistogram = "histogram"
MetricTypeSummary = "summary"
MetricTypeUntyped = "untyped"
)
func parseError(exp string, got token) error {
return fmt.Errorf("%s, got %q", exp, got)
}
// Next advances the parser to the next sample. It returns false if no
// more samples were read or an error occurred.
func (p *Parser) Next() (Entry, error) {
var err error
p.start = p.l.i
p.offsets = p.offsets[:0]
switch t := p.nextToken(); t {
case tEOF:
return EntryInvalid, io.EOF
case tLinebreak:
// Allow full blank lines.
return p.Next()
case tHelp, tType:
switch t := p.nextToken(); t {
case tMName:
p.offsets = append(p.offsets, p.l.start, p.l.i)
default:
return EntryInvalid, parseError("expected metric name after HELP", t)
}
switch t := p.nextToken(); t {
case tText:
p.text = p.l.buf()[1:]
default:
return EntryInvalid, parseError("expected text in HELP", t)
}
switch t {
case tType:
switch s := yoloString(p.text); s {
case "counter":
p.mtype = MetricTypeCounter
case "gauge":
p.mtype = MetricTypeGauge
case "histogram":
p.mtype = MetricTypeHistogram
case "summary":
p.mtype = MetricTypeSummary
case "untyped":
p.mtype = MetricTypeUntyped
default:
return EntryInvalid, fmt.Errorf("invalid metric type %q", s)
}
case tHelp:
if !utf8.Valid(p.text) {
return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string")
}
}
if t := p.nextToken(); t != tLinebreak {
return EntryInvalid, parseError("linebreak expected after metadata", t)
}
switch t {
case tHelp:
return EntryHelp, nil
case tType:
return EntryType, nil
}
case tComment:
p.text = p.l.buf()
if t := p.nextToken(); t != tLinebreak {
return EntryInvalid, parseError("linebreak expected after comment", t)
}
return EntryComment, nil
case tMName:
p.offsets = append(p.offsets, p.l.i)
p.series = p.l.b[p.start:p.l.i]
t2 := p.nextToken()
if t2 == tBraceOpen {
if err := p.parseLVals(); err != nil {
return EntryInvalid, err
}
p.series = p.l.b[p.start:p.l.i]
t2 = p.nextToken()
}
if t2 != tValue {
return EntryInvalid, parseError("expected value after metric", t)
}
if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()), 64); err != nil {
return EntryInvalid, err
}
// Ensure canonical NaN value.
if math.IsNaN(p.val) {
p.val = math.Float64frombits(value.NormalNaN)
}
p.hasTS = false
switch p.nextToken() {
case tLinebreak:
break
case tTimestamp:
p.hasTS = true
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
return EntryInvalid, err
}
if t2 := p.nextToken(); t2 != tLinebreak {
return EntryInvalid, parseError("expected next entry after timestamp", t)
}
default:
return EntryInvalid, parseError("expected timestamp or new record", t)
}
return EntrySeries, nil
default:
err = fmt.Errorf("%q is not a valid start token", t)
}
return EntryInvalid, err
}
func (p *Parser) parseLVals() error {
t := p.nextToken()
for {
switch t {
case tBraceClose:
return nil
case tLName:
default:
return parseError("expected label name", t)
}
p.offsets = append(p.offsets, p.l.start, p.l.i)
if t := p.nextToken(); t != tEqual {
return parseError("expected equal", t)
}
if t := p.nextToken(); t != tLValue {
return parseError("expected label value", t)
}
if !utf8.Valid(p.l.buf()) {
return fmt.Errorf("invalid UTF-8 label value")
}
// The lexer ensures the value string is quoted. Strip first
// and last character.
p.offsets = append(p.offsets, p.l.start+1, p.l.i-1)
// Free trailing commas are allowed.
if t = p.nextToken(); t == tComma {
t = p.nextToken()
}
}
}
var lvalReplacer = strings.NewReplacer(
`\"`, "\"",
`\\`, "\\",
`\n`, "\n",
)
var helpReplacer = strings.NewReplacer(
`\\`, "\\",
`\n`, "\n",
) )
func yoloString(b []byte) string { func yoloString(b []byte) string {

View file

@ -29,15 +29,19 @@ import (
func TestParse(t *testing.T) { func TestParse(t *testing.T) {
input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary # TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 4.9351e-05 go_gc_duration_seconds{quantile="0"} 4.9351e-05
go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05 go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05
go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05 go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05
go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05 go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05
# Hrandom comment starting with prefix of HELP
#
# comment with escaped \n newline
# comment with escaped \ escape character
go_gc_duration_seconds{ quantile="1.0", a="b" } 8.3835e-05 go_gc_duration_seconds{ quantile="1.0", a="b" } 8.3835e-05
go_gc_duration_seconds { quantile="1.0", a="b" } 8.3835e-05 go_gc_duration_seconds { quantile="1.0", a="b" } 8.3835e-05
go_gc_duration_seconds { quantile= "1.0", a= "b" } 8.3835e-05 go_gc_duration_seconds { quantile= "1.0", a= "b", } 8.3835e-05
go_gc_duration_seconds { quantile = "1.0", a = "b" } 8.3835e-05 go_gc_duration_seconds { quantile = "1.0", a = "b" } 8.3835e-05
go_gc_duration_seconds_count 99 go_gc_duration_seconds_count 99
some:aggregate:rate5m{a_b="c"} 1 some:aggregate:rate5m{a_b="c"} 1
@ -47,17 +51,27 @@ go_goroutines 33 123123
_metric_starting_with_underscore 1 _metric_starting_with_underscore 1
testmetric{_label_starting_with_underscore="foo"} 1 testmetric{_label_starting_with_underscore="foo"} 1
testmetric{label="\"bar\""} 1` testmetric{label="\"bar\""} 1`
input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
int64p := func(x int64) *int64 { return &x } int64p := func(x int64) *int64 { return &x }
exp := []struct { exp := []struct {
lset labels.Labels lset labels.Labels
m string m string
t *int64 t *int64
v float64 v float64
typ MetricType
help string
comment string
}{ }{
{ {
m: "go_gc_duration_seconds",
help: "A summary of the GC invocation durations.",
}, {
m: "go_gc_duration_seconds",
typ: MetricTypeSummary,
}, {
m: `go_gc_duration_seconds{quantile="0"}`, m: `go_gc_duration_seconds{quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
@ -77,6 +91,14 @@ testmetric{label="\"bar\""} 1`
m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`, m: `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
}, {
comment: "# Hrandom comment starting with prefix of HELP",
}, {
comment: "#",
}, {
comment: "# comment with escaped \\n newline",
}, {
comment: "# comment with escaped \\ escape character",
}, { }, {
m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`, m: `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
v: 8.3835e-05, v: 8.3835e-05,
@ -86,7 +108,7 @@ testmetric{label="\"bar\""} 1`
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, { }, {
m: `go_gc_duration_seconds { quantile= "1.0", a= "b" }`, m: `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
v: 8.3835e-05, v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"), lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, { }, {
@ -101,6 +123,12 @@ testmetric{label="\"bar\""} 1`
m: `some:aggregate:rate5m{a_b="c"}`, m: `some:aggregate:rate5m{a_b="c"}`,
v: 1, v: 1,
lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"), lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
}, {
m: "go_goroutines",
help: "Number of goroutines that currently exist.",
}, {
m: "go_goroutines",
typ: MetricTypeGauge,
}, { }, {
m: `go_goroutines`, m: `go_goroutines`,
v: 33, v: 33,
@ -118,6 +146,9 @@ testmetric{label="\"bar\""} 1`
m: "testmetric{label=\"\\\"bar\\\"\"}", m: "testmetric{label=\"\\\"bar\\\"\"}",
v: 1, v: 1,
lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
}, {
m: "metric",
help: "foo\x00bar",
}, { }, {
m: "null_byte_metric{a=\"abc\x00\"}", m: "null_byte_metric{a=\"abc\x00\"}",
v: 1, v: 1,
@ -130,23 +161,42 @@ testmetric{label="\"bar\""} 1`
var res labels.Labels var res labels.Labels
for p.Next() { for {
m, ts, v := p.At() et, err := p.Next()
if err == io.EOF {
break
}
require.NoError(t, err)
p.Metric(&res) switch et {
case EntrySeries:
m, ts, v := p.Series()
require.Equal(t, exp[i].m, string(m)) p.Metric(&res)
require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v) require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].lset, res) require.Equal(t, exp[i].t, ts)
require.Equal(t, exp[i].v, v)
require.Equal(t, exp[i].lset, res)
res = res[:0]
case EntryType:
m, typ := p.Type()
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].typ, typ)
case EntryHelp:
m, h := p.Help()
require.Equal(t, exp[i].m, string(m))
require.Equal(t, exp[i].help, string(h))
case EntryComment:
require.Equal(t, exp[i].comment, string(p.Comment()))
}
i++ i++
res = res[:0]
} }
require.NoError(t, p.Err())
require.Equal(t, len(exp), i) require.Equal(t, len(exp), i)
} }
func TestParseErrors(t *testing.T) { func TestParseErrors(t *testing.T) {
@ -156,19 +206,19 @@ func TestParseErrors(t *testing.T) {
}{ }{
{ {
input: "a", input: "a",
err: "no token found", err: "expected value after metric, got \"MNAME\"",
}, },
{ {
input: "a{b='c'} 1\n", input: "a{b='c'} 1\n",
err: "no token found", err: "expected label value, got \"INVALID\"",
}, },
{ {
input: "a{b=\n", input: "a{b=\n",
err: "no token found", err: "expected label value, got \"INVALID\"",
}, },
{ {
input: "a{\xff=\"foo\"} 1\n", input: "a{\xff=\"foo\"} 1\n",
err: "no token found", err: "expected label name, got \"INVALID\"",
}, },
{ {
input: "a{b=\"\xff\"} 1\n", input: "a{b=\"\xff\"} 1\n",
@ -180,20 +230,22 @@ func TestParseErrors(t *testing.T) {
}, },
{ {
input: "something_weird{problem=\"", input: "something_weird{problem=\"",
err: "no token found", err: "expected label value, got \"INVALID\"",
}, },
{ {
input: "empty_label_name{=\"\"} 0", input: "empty_label_name{=\"\"} 0",
err: "no token found", err: "expected label name, got \"EQUAL\"",
}, },
} }
for _, c := range cases { for i, c := range cases {
p := New([]byte(c.input)) p := New([]byte(c.input))
for p.Next() { var err error
for err == nil {
_, err = p.Next()
} }
require.NotNil(t, p.Err()) require.NotNil(t, err)
require.Equal(t, c.err, p.Err().Error()) require.Equal(t, c.err, err.Error(), "test %d", i)
} }
} }
@ -220,34 +272,36 @@ func TestNullByteHandling(t *testing.T) {
}, },
{ {
input: "a{b=\x00\"ssss\"} 1\n", input: "a{b=\x00\"ssss\"} 1\n",
err: "no token found", err: "expected label value, got \"INVALID\"",
}, },
{ {
input: "a{b=\"\x00", input: "a{b=\"\x00",
err: "no token found", err: "expected label value, got \"INVALID\"",
}, },
{ {
input: "a{b\x00=\"hiih\"} 1", input: "a{b\x00=\"hiih\"} 1",
err: "no token found", err: "expected equal, got \"INVALID\"",
}, },
{ {
input: "a\x00{b=\"ddd\"} 1", input: "a\x00{b=\"ddd\"} 1",
err: "no token found", err: "expected value after metric, got \"MNAME\"",
}, },
} }
for _, c := range cases { for i, c := range cases {
p := New([]byte(c.input)) p := New([]byte(c.input))
for p.Next() { var err error
for err == nil {
_, err = p.Next()
} }
if c.err == "" { if c.err == "" {
require.NoError(t, p.Err()) require.Equal(t, io.EOF, err, "test %d", i)
continue continue
} }
require.Error(t, p.Err()) require.Error(t, err)
require.Equal(t, c.err, p.Err().Error()) require.Equal(t, c.err, err.Error(), "test %d", i)
} }
} }
@ -274,13 +328,21 @@ func BenchmarkParse(b *testing.B) {
for i := 0; i < b.N; i += testdataSampleCount { for i := 0; i < b.N; i += testdataSampleCount {
p := New(buf) p := New(buf)
for p.Next() && i < b.N { Outer:
m, _, _ := p.At() for i < b.N {
t, err := p.Next()
total += len(m) switch t {
i++ case EntryInvalid:
if err == io.EOF {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
total += len(m)
i++
}
} }
require.NoError(b, p.Err())
} }
_ = total _ = total
}) })
@ -294,16 +356,25 @@ func BenchmarkParse(b *testing.B) {
for i := 0; i < b.N; i += testdataSampleCount { for i := 0; i < b.N; i += testdataSampleCount {
p := New(buf) p := New(buf)
for p.Next() && i < b.N { Outer:
m, _, _ := p.At() for i < b.N {
t, err := p.Next()
switch t {
case EntryInvalid:
if err == io.EOF {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
res := make(labels.Labels, 0, 5) res := make(labels.Labels, 0, 5)
p.Metric(&res) p.Metric(&res)
total += len(m) total += len(m)
i++ i++
}
} }
require.NoError(b, p.Err())
} }
_ = total _ = total
}) })
@ -318,16 +389,25 @@ func BenchmarkParse(b *testing.B) {
for i := 0; i < b.N; i += testdataSampleCount { for i := 0; i < b.N; i += testdataSampleCount {
p := New(buf) p := New(buf)
for p.Next() && i < b.N { Outer:
m, _, _ := p.At() for i < b.N {
t, err := p.Next()
switch t {
case EntryInvalid:
if err == io.EOF {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
p.Metric(&res) p.Metric(&res)
total += len(m) total += len(m)
i++ i++
res = res[:0] res = res[:0]
}
} }
require.NoError(b, p.Err())
} }
_ = total _ = total
}) })
@ -361,7 +441,6 @@ func BenchmarkParse(b *testing.B) {
}) })
} }
} }
func BenchmarkGzip(b *testing.B) { func BenchmarkGzip(b *testing.B) {
for _, fn := range []string{"testdata.txt", "testdata.nometa.txt"} { for _, fn := range []string{"testdata.txt", "testdata.nometa.txt"} {
b.Run(fn, func(b *testing.B) { b.Run(fn, func(b *testing.B) {

14
prompb/README.md Normal file
View file

@ -0,0 +1,14 @@
The compiled protobufs are version controlled and you won't normally need to
re-compile them when building Prometheus.
If however you have modified the defs and do need to re-compile, run
`./scripts/genproto.sh` from the parent dir.
In order for the script to run, you'll need `protoc` (version 3.5) in your
PATH, and the following Go packages installed:
- github.com/gogo/protobuf
- github.com/gogo/protobuf/protoc-gen-gogofast
- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/
- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
- golang.org/x/tools/cmd/goimports

View file

@ -1401,9 +1401,9 @@ func scalarBinop(op ItemType, lhs, rhs float64) float64 {
case itemDIV: case itemDIV:
return lhs / rhs return lhs / rhs
case itemPOW: case itemPOW:
return math.Pow(float64(lhs), float64(rhs)) return math.Pow(lhs, rhs)
case itemMOD: case itemMOD:
return math.Mod(float64(lhs), float64(rhs)) return math.Mod(lhs, rhs)
case itemEQL: case itemEQL:
return btos(lhs == rhs) return btos(lhs == rhs)
case itemNEQ: case itemNEQ:
@ -1432,9 +1432,9 @@ func vectorElemBinop(op ItemType, lhs, rhs float64) (float64, bool) {
case itemDIV: case itemDIV:
return lhs / rhs, true return lhs / rhs, true
case itemPOW: case itemPOW:
return math.Pow(float64(lhs), float64(rhs)), true return math.Pow(lhs, rhs), true
case itemMOD: case itemMOD:
return math.Mod(float64(lhs), float64(rhs)), true return math.Mod(lhs, rhs), true
case itemEQL: case itemEQL:
return lhs, lhs == rhs return lhs, lhs == rhs
case itemNEQ: case itemNEQ:
@ -1510,7 +1510,7 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
lb.Del(labels.MetricName) lb.Del(labels.MetricName)
} }
if op == itemCountValues { if op == itemCountValues {
lb.Set(valueLabel, strconv.FormatFloat(float64(s.V), 'f', -1, 64)) lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64))
} }
var ( var (
@ -1578,12 +1578,12 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
group.groupCount++ group.groupCount++
case itemMax: case itemMax:
if group.value < s.V || math.IsNaN(float64(group.value)) { if group.value < s.V || math.IsNaN(group.value) {
group.value = s.V group.value = s.V
} }
case itemMin: case itemMin:
if group.value > s.V || math.IsNaN(float64(group.value)) { if group.value > s.V || math.IsNaN(group.value) {
group.value = s.V group.value = s.V
} }
@ -1596,7 +1596,7 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
group.groupCount++ group.groupCount++
case itemTopK: case itemTopK:
if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(float64(group.heap[0].V)) { if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) {
if int64(len(group.heap)) == k { if int64(len(group.heap)) == k {
heap.Pop(&group.heap) heap.Pop(&group.heap)
} }
@ -1607,7 +1607,7 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
} }
case itemBottomK: case itemBottomK:
if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(float64(group.reverseHeap[0].V)) { if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) {
if int64(len(group.reverseHeap)) == k { if int64(len(group.reverseHeap)) == k {
heap.Pop(&group.reverseHeap) heap.Pop(&group.reverseHeap)
} }
@ -1635,12 +1635,12 @@ func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, p
aggr.value = float64(aggr.groupCount) aggr.value = float64(aggr.groupCount)
case itemStdvar: case itemStdvar:
avg := float64(aggr.value) / float64(aggr.groupCount) avg := aggr.value / float64(aggr.groupCount)
aggr.value = float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg aggr.value = aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg
case itemStddev: case itemStddev:
avg := float64(aggr.value) / float64(aggr.groupCount) avg := aggr.value / float64(aggr.groupCount)
aggr.value = math.Sqrt(float64(aggr.valuesSquaredSum)/float64(aggr.groupCount) - avg*avg) aggr.value = math.Sqrt(aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg)
case itemTopK: case itemTopK:
// The heap keeps the lowest value on top, so reverse it. // The heap keeps the lowest value on top, so reverse it.

View file

@ -299,7 +299,7 @@ func funcClampMax(vals []Value, args Expressions, enh *EvalNodeHelper) Vector {
for _, el := range vec { for _, el := range vec {
enh.out = append(enh.out, Sample{ enh.out = append(enh.out, Sample{
Metric: enh.dropMetricName(el.Metric), Metric: enh.dropMetricName(el.Metric),
Point: Point{V: math.Min(max, float64(el.V))}, Point: Point{V: math.Min(max, el.V)},
}) })
} }
return enh.out return enh.out
@ -312,7 +312,7 @@ func funcClampMin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector {
for _, el := range vec { for _, el := range vec {
enh.out = append(enh.out, Sample{ enh.out = append(enh.out, Sample{
Metric: enh.dropMetricName(el.Metric), Metric: enh.dropMetricName(el.Metric),
Point: Point{V: math.Max(min, float64(el.V))}, Point: Point{V: math.Max(min, el.V)},
}) })
} }
return enh.out return enh.out
@ -331,7 +331,7 @@ func funcRound(vals []Value, args Expressions, enh *EvalNodeHelper) Vector {
toNearestInverse := 1.0 / toNearest toNearestInverse := 1.0 / toNearest
for _, el := range vec { for _, el := range vec {
v := math.Floor(float64(el.V)*toNearestInverse+0.5) / toNearestInverse v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse
enh.out = append(enh.out, Sample{ enh.out = append(enh.out, Sample{
Metric: enh.dropMetricName(el.Metric), Metric: enh.dropMetricName(el.Metric),
Point: Point{V: v}, Point: Point{V: v},
@ -392,7 +392,7 @@ func funcMaxOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector
return aggrOverTime(vals, enh, func(values []Point) float64 { return aggrOverTime(vals, enh, func(values []Point) float64 {
max := math.Inf(-1) max := math.Inf(-1)
for _, v := range values { for _, v := range values {
max = math.Max(max, float64(v.V)) max = math.Max(max, v.V)
} }
return max return max
}) })
@ -403,7 +403,7 @@ func funcMinOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector
return aggrOverTime(vals, enh, func(values []Point) float64 { return aggrOverTime(vals, enh, func(values []Point) float64 {
min := math.Inf(1) min := math.Inf(1)
for _, v := range values { for _, v := range values {
min = math.Min(min, float64(v.V)) min = math.Min(min, v.V)
} }
return min return min
}) })
@ -451,7 +451,7 @@ func funcStddevOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vec
count++ count++
} }
avg := sum / count avg := sum / count
return math.Sqrt(float64(squaredSum/count - avg*avg)) return math.Sqrt(squaredSum/count - avg*avg)
}) })
} }
@ -698,7 +698,7 @@ func funcChanges(vals []Value, args Expressions, enh *EvalNodeHelper) Vector {
prev := samples.Points[0].V prev := samples.Points[0].V
for _, sample := range samples.Points[1:] { for _, sample := range samples.Points[1:] {
current := sample.V current := sample.V
if current != prev && !(math.IsNaN(float64(current)) && math.IsNaN(float64(prev))) { if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) {
changes++ changes++
} }
prev = current prev = current
@ -727,7 +727,7 @@ func funcLabelReplace(vals []Value, args Expressions, enh *EvalNodeHelper) Vecto
if err != nil { if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
} }
if !model.LabelNameRE.MatchString(string(dst)) { if !model.LabelNameRE.MatchString(dst) {
panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst))
} }
enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) enh.dmn = make(map[uint64]labels.Labels, len(enh.out))
@ -1217,7 +1217,7 @@ func (s vectorByValueHeap) Len() int {
} }
func (s vectorByValueHeap) Less(i, j int) bool { func (s vectorByValueHeap) Less(i, j int) bool {
if math.IsNaN(float64(s[i].V)) { if math.IsNaN(s[i].V) {
return true return true
} }
return s[i].V < s[j].V return s[i].V < s[j].V
@ -1246,7 +1246,7 @@ func (s vectorByReverseValueHeap) Len() int {
} }
func (s vectorByReverseValueHeap) Less(i, j int) bool { func (s vectorByReverseValueHeap) Less(i, j int) bool {
if math.IsNaN(float64(s[i].V)) { if math.IsNaN(s[i].V) {
return true return true
} }
return s[i].V > s[j].V return s[i].V > s[j].V

View file

@ -104,7 +104,7 @@ func bucketQuantile(q float64, buckets buckets) float64 {
count -= buckets[b-1].count count -= buckets[b-1].count
rank -= buckets[b-1].count rank -= buckets[b-1].count
} }
return bucketStart + (bucketEnd-bucketStart)*float64(rank/count) return bucketStart + (bucketEnd-bucketStart)*(rank/count)
} }
// The assumption that bucket counts increase monotonically with increasing // The assumption that bucket counts increase monotonically with increasing
@ -179,5 +179,5 @@ func quantile(q float64, values vectorByValueHeap) float64 {
upperIndex := math.Min(n-1, lowerIndex+1) upperIndex := math.Min(n-1, lowerIndex+1)
weight := rank - math.Floor(rank) weight := rank - math.Floor(rank)
return float64(values[int(lowerIndex)].V)*(1-weight) + float64(values[int(upperIndex)].V)*weight return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight
} }

View file

@ -160,7 +160,7 @@ func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
} }
ts := testStartTime.Add(time.Duration(offset)) ts := testStartTime.Add(time.Duration(offset))
cmd := newEvalCmd(expr, ts) cmd := newEvalCmd(expr, ts, i+1)
switch mod { switch mod {
case "ordered": case "ordered":
cmd.ordered = true cmd.ordered = true
@ -303,6 +303,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
type evalCmd struct { type evalCmd struct {
expr string expr string
start time.Time start time.Time
line int
fail, ordered bool fail, ordered bool
@ -319,10 +320,11 @@ func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals) return fmt.Sprintf("%d: %s", e.pos, e.vals)
} }
func newEvalCmd(expr string, start time.Time) *evalCmd { func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{ return &evalCmd{
expr: expr, expr: expr,
start: start, start: start,
line: line,
metrics: map[uint64]labels.Labels{}, metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{}, expected: map[uint64]entry{},
@ -437,11 +439,11 @@ func (t *Test) exec(tc testCommand) error {
if cmd.fail { if cmd.fail {
return nil return nil
} }
return fmt.Errorf("error evaluating query %q: %s", cmd.expr, res.Err) return fmt.Errorf("error evaluating query %q (line %d): %s", cmd.expr, cmd.line, res.Err)
} }
defer q.Close() defer q.Close()
if res.Err == nil && cmd.fail { if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query but got none") return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
} }
err := cmd.compareResult(res.Value) err := cmd.compareResult(res.Value)
@ -454,7 +456,7 @@ func (t *Test) exec(tc testCommand) error {
q, _ = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) q, _ = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute)
rangeRes := q.Exec(t.context) rangeRes := q.Exec(t.context)
if rangeRes.Err != nil { if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q in range mode: %s", cmd.expr, rangeRes.Err) return fmt.Errorf("error evaluating query %q (line %d) in range mode: %s", cmd.expr, cmd.line, rangeRes.Err)
} }
defer q.Close() defer q.Close()
if cmd.ordered { if cmd.ordered {
@ -477,7 +479,7 @@ func (t *Test) exec(tc testCommand) error {
err = cmd.compareResult(vec) err = cmd.compareResult(vec)
} }
if err != nil { if err != nil {
return fmt.Errorf("error in %s %s rande mode: %s", cmd, cmd.expr, err) return fmt.Errorf("error in %s %s (line %d) rande mode: %s", cmd, cmd.expr, cmd.line, err)
} }
default: default:

View file

@ -0,0 +1,5 @@
groups:
- name: test
rules:
- record: job:http_requests:rate5m
expr: sum by (job)(rate(http_requests_total[5m]))

View file

@ -287,6 +287,7 @@ func TestCopyState(t *testing.T) {
} }
func TestUpdate(t *testing.T) { func TestUpdate(t *testing.T) {
files := []string{"fixtures/rules.yaml"}
expected := map[string]labels.Labels{ expected := map[string]labels.Labels{
"test": labels.FromStrings("name", "value"), "test": labels.FromStrings("name", "value"),
} }
@ -296,15 +297,16 @@ func TestUpdate(t *testing.T) {
}) })
ruleManager.Run() ruleManager.Run()
err := ruleManager.Update(0, nil) err := ruleManager.Update(10*time.Second, files)
testutil.Ok(t, err) testutil.Ok(t, err)
testutil.Assert(t, len(ruleManager.groups) > 0, "expected non-empty rule groups")
for _, g := range ruleManager.groups { for _, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{ g.seriesInPreviousEval = []map[string]labels.Labels{
expected, expected,
} }
} }
err = ruleManager.Update(0, nil) err = ruleManager.Update(10*time.Second, files)
testutil.Ok(t, err) testutil.Ok(t, err)
for _, g := range ruleManager.groups { for _, g := range ruleManager.groups {
for _, actual := range g.seriesInPreviousEval { for _, actual := range g.seriesInPreviousEval {

View file

@ -33,7 +33,6 @@ type Appendable interface {
// NewManager is the Manager constructor // NewManager is the Manager constructor
func NewManager(logger log.Logger, app Appendable) *Manager { func NewManager(logger log.Logger, app Appendable) *Manager {
return &Manager{ return &Manager{
append: app, append: app,
logger: logger, logger: logger,

View file

@ -16,13 +16,13 @@ package scrape
import ( import (
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
yaml "gopkg.in/yaml.v2"
) )
func mustNewRegexp(s string) config.Regexp { func mustNewRegexp(s string) config.Regexp {
@ -229,39 +229,41 @@ func TestPopulateLabels(t *testing.T) {
func TestManagerReloadNoChange(t *testing.T) { func TestManagerReloadNoChange(t *testing.T) {
tsetName := "test" tsetName := "test"
reloadCfg := &config.Config{ cfgText := `
ScrapeConfigs: []*config.ScrapeConfig{ scrape_configs:
&config.ScrapeConfig{ - job_name: '` + tsetName + `'
ScrapeInterval: model.Duration(3 * time.Second), static_configs:
ScrapeTimeout: model.Duration(2 * time.Second), - targets: ["foo:9090"]
}, - targets: ["bar:9090"]
}, `
cfg := &config.Config{}
if err := yaml.UnmarshalStrict([]byte(cfgText), cfg); err != nil {
t.Fatalf("Unable to load YAML config cfgYaml: %s", err)
} }
scrapeManager := NewManager(nil, nil) scrapeManager := NewManager(nil, nil)
scrapeManager.scrapeConfigs[tsetName] = reloadCfg.ScrapeConfigs[0] // Load the current config.
scrapeManager.ApplyConfig(cfg)
// As reload never happens, new loop should never be called. // As reload never happens, new loop should never be called.
newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop { newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop {
t.Fatal("reload happened") t.Fatal("reload happened")
return nil return nil
} }
sp := &scrapePool{ sp := &scrapePool{
appendable: &nopAppendable{}, appendable: &nopAppendable{},
targets: map[uint64]*Target{}, targets: map[uint64]*Target{},
loops: map[uint64]loop{ loops: map[uint64]loop{
1: &scrapeLoop{}, 1: &testLoop{},
}, },
newLoop: newLoop, newLoop: newLoop,
logger: nil, logger: nil,
config: reloadCfg.ScrapeConfigs[0], config: cfg.ScrapeConfigs[0],
} }
scrapeManager.scrapePools = map[string]*scrapePool{ scrapeManager.scrapePools = map[string]*scrapePool{
tsetName: sp, tsetName: sp,
} }
targets := map[string][]*targetgroup.Group{ scrapeManager.ApplyConfig(cfg)
tsetName: []*targetgroup.Group{},
}
scrapeManager.reload(targets)
} }

View file

@ -161,6 +161,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger)
logger: logger, logger: logger,
} }
sp.newLoop = func(t *Target, s scraper, limit int, honor bool, mrc []*config.RelabelConfig) loop { sp.newLoop = func(t *Target, s scraper, limit int, honor bool, mrc []*config.RelabelConfig) loop {
// Update the targets retrieval function for metadata to a new scrape cache.
cache := newScrapeCache()
t.setMetadataStore(cache)
return newScrapeLoop( return newScrapeLoop(
ctx, ctx,
s, s,
@ -175,6 +179,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger)
} }
return appender(app, limit) return appender(app, limit)
}, },
cache,
) )
} }
@ -523,43 +528,62 @@ type scrapeCache struct {
// Parsed string to an entry with information about the actual label set // Parsed string to an entry with information about the actual label set
// and its storage reference. // and its storage reference.
entries map[string]*cacheEntry series map[string]*cacheEntry
// Cache of dropped metric strings and their iteration. The iteration must // Cache of dropped metric strings and their iteration. The iteration must
// be a pointer so we can update it without setting a new entry with an unsafe // be a pointer so we can update it without setting a new entry with an unsafe
// string in addDropped(). // string in addDropped().
dropped map[string]*uint64 droppedSeries map[string]*uint64
// seriesCur and seriesPrev store the labels of series that were seen // seriesCur and seriesPrev store the labels of series that were seen
// in the current and previous scrape. // in the current and previous scrape.
// We hold two maps and swap them out to save allocations. // We hold two maps and swap them out to save allocations.
seriesCur map[uint64]labels.Labels seriesCur map[uint64]labels.Labels
seriesPrev map[uint64]labels.Labels seriesPrev map[uint64]labels.Labels
metaMtx sync.Mutex
metadata map[string]*metaEntry
}
// metaEntry holds meta information about a metric.
type metaEntry struct {
lastIter uint64 // Last scrape iteration the entry was observed at.
typ textparse.MetricType
help string
} }
func newScrapeCache() *scrapeCache { func newScrapeCache() *scrapeCache {
return &scrapeCache{ return &scrapeCache{
entries: map[string]*cacheEntry{}, series: map[string]*cacheEntry{},
dropped: map[string]*uint64{}, droppedSeries: map[string]*uint64{},
seriesCur: map[uint64]labels.Labels{}, seriesCur: map[uint64]labels.Labels{},
seriesPrev: map[uint64]labels.Labels{}, seriesPrev: map[uint64]labels.Labels{},
metadata: map[string]*metaEntry{},
} }
} }
func (c *scrapeCache) iterDone() { func (c *scrapeCache) iterDone() {
// refCache and lsetCache may grow over time through series churn // All caches may grow over time through series churn
// or multiple string representations of the same metric. Clean up entries // or multiple string representations of the same metric. Clean up entries
// that haven't appeared in the last scrape. // that haven't appeared in the last scrape.
for s, e := range c.entries { for s, e := range c.series {
if c.iter-e.lastIter > 2 { if c.iter-e.lastIter > 2 {
delete(c.entries, s) delete(c.series, s)
} }
} }
for s, iter := range c.dropped { for s, iter := range c.droppedSeries {
if c.iter-*iter > 2 { if c.iter-*iter > 2 {
delete(c.dropped, s) delete(c.droppedSeries, s)
} }
} }
c.metaMtx.Lock()
for m, e := range c.metadata {
// Keep metadata around for 10 scrapes after its metric disappeared.
if c.iter-e.lastIter > 10 {
delete(c.metadata, m)
}
}
c.metaMtx.Unlock()
// Swap current and previous series. // Swap current and previous series.
c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev
@ -573,7 +597,7 @@ func (c *scrapeCache) iterDone() {
} }
func (c *scrapeCache) get(met string) (*cacheEntry, bool) { func (c *scrapeCache) get(met string) (*cacheEntry, bool) {
e, ok := c.entries[met] e, ok := c.series[met]
if !ok { if !ok {
return nil, false return nil, false
} }
@ -585,16 +609,16 @@ func (c *scrapeCache) addRef(met string, ref uint64, lset labels.Labels, hash ui
if ref == 0 { if ref == 0 {
return return
} }
c.entries[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} c.series[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
} }
func (c *scrapeCache) addDropped(met string) { func (c *scrapeCache) addDropped(met string) {
iter := c.iter iter := c.iter
c.dropped[met] = &iter c.droppedSeries[met] = &iter
} }
func (c *scrapeCache) getDropped(met string) bool { func (c *scrapeCache) getDropped(met string) bool {
iterp, ok := c.dropped[met] iterp, ok := c.droppedSeries[met]
if ok { if ok {
*iterp = c.iter *iterp = c.iter
} }
@ -615,6 +639,67 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
} }
} }
func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) {
c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)]
if !ok {
e = &metaEntry{typ: textparse.MetricTypeUntyped}
c.metadata[string(metric)] = e
}
e.typ = t
e.lastIter = c.iter
c.metaMtx.Unlock()
}
func (c *scrapeCache) setHelp(metric, help []byte) {
c.metaMtx.Lock()
e, ok := c.metadata[yoloString(metric)]
if !ok {
e = &metaEntry{typ: textparse.MetricTypeUntyped}
c.metadata[string(metric)] = e
}
if e.help != yoloString(help) {
e.help = string(help)
}
e.lastIter = c.iter
c.metaMtx.Unlock()
}
func (c *scrapeCache) getMetadata(metric string) (MetricMetadata, bool) {
c.metaMtx.Lock()
defer c.metaMtx.Unlock()
m, ok := c.metadata[metric]
if !ok {
return MetricMetadata{}, false
}
return MetricMetadata{
Metric: metric,
Type: m.typ,
Help: m.help,
}, true
}
func (c *scrapeCache) listMetadata() []MetricMetadata {
c.metaMtx.Lock()
defer c.metaMtx.Unlock()
res := make([]MetricMetadata, 0, len(c.metadata))
for m, e := range c.metadata {
res = append(res, MetricMetadata{
Metric: m,
Type: e.typ,
Help: e.help,
})
}
return res
}
func newScrapeLoop(ctx context.Context, func newScrapeLoop(ctx context.Context,
sc scraper, sc scraper,
l log.Logger, l log.Logger,
@ -622,6 +707,7 @@ func newScrapeLoop(ctx context.Context,
sampleMutator labelsMutator, sampleMutator labelsMutator,
reportSampleMutator labelsMutator, reportSampleMutator labelsMutator,
appender func() storage.Appender, appender func() storage.Appender,
cache *scrapeCache,
) *scrapeLoop { ) *scrapeLoop {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
@ -629,10 +715,13 @@ func newScrapeLoop(ctx context.Context,
if buffers == nil { if buffers == nil {
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
} }
if cache == nil {
cache = newScrapeCache()
}
sl := &scrapeLoop{ sl := &scrapeLoop{
scraper: sc, scraper: sc,
buffers: buffers, buffers: buffers,
cache: newScrapeCache(), cache: cache,
appender: appender, appender: appender,
sampleMutator: sampleMutator, sampleMutator: sampleMutator,
reportSampleMutator: reportSampleMutator, reportSampleMutator: reportSampleMutator,
@ -830,11 +919,29 @@ func (sl *scrapeLoop) append(b []byte, ts time.Time) (total, added int, err erro
var sampleLimitErr error var sampleLimitErr error
loop: loop:
for p.Next() { for {
var et textparse.Entry
if et, err = p.Next(); err != nil {
if err == io.EOF {
err = nil
}
break
}
switch et {
case textparse.EntryType:
sl.cache.setType(p.Type())
continue
case textparse.EntryHelp:
sl.cache.setHelp(p.Help())
continue
case textparse.EntryComment:
continue
default:
}
total++ total++
t := defTime t := defTime
met, tp, v := p.At() met, tp, v := p.Series()
if tp != nil { if tp != nil {
t = *tp t = *tp
} }
@ -931,10 +1038,10 @@ loop:
} }
added++ added++
} }
if err == nil {
err = p.Err()
}
if sampleLimitErr != nil { if sampleLimitErr != nil {
if err == nil {
err = sampleLimitErr
}
// We only want to increment this once per scrape, so this is Inc'd outside the loop. // We only want to increment this once per scrape, so this is Inc'd outside the loop.
targetScrapeSampleLimit.Inc() targetScrapeSampleLimit.Inc()
} }

View file

@ -37,6 +37,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/textparse"
"github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
@ -306,7 +307,7 @@ func TestScrapePoolAppender(t *testing.T) {
app := &nopAppendable{} app := &nopAppendable{}
sp := newScrapePool(cfg, app, nil) sp := newScrapePool(cfg, app, nil)
loop := sp.newLoop(nil, nil, 0, false, nil) loop := sp.newLoop(&Target{}, nil, 0, false, nil)
appl, ok := loop.(*scrapeLoop) appl, ok := loop.(*scrapeLoop)
if !ok { if !ok {
t.Fatalf("Expected scrapeLoop but got %T", loop) t.Fatalf("Expected scrapeLoop but got %T", loop)
@ -321,7 +322,7 @@ func TestScrapePoolAppender(t *testing.T) {
t.Fatalf("Expected base appender but got %T", tl.Appender) t.Fatalf("Expected base appender but got %T", tl.Appender)
} }
loop = sp.newLoop(nil, nil, 100, false, nil) loop = sp.newLoop(&Target{}, nil, 100, false, nil)
appl, ok = loop.(*scrapeLoop) appl, ok = loop.(*scrapeLoop)
if !ok { if !ok {
t.Fatalf("Expected scrapeLoop but got %T", loop) t.Fatalf("Expected scrapeLoop but got %T", loop)
@ -387,7 +388,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
nil, nil, nil, nil,
nopMutator, nopMutator,
nopMutator, nopMutator,
nil, nil, nil,
) )
// The scrape pool synchronizes on stopping scrape loops. However, new scrape // The scrape pool synchronizes on stopping scrape loops. However, new scrape
@ -450,6 +451,7 @@ func TestScrapeLoopStop(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
// Terminate loop after 2 scrapes. // Terminate loop after 2 scrapes.
@ -514,6 +516,7 @@ func TestScrapeLoopRun(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
// The loop must terminate during the initial offset if the context // The loop must terminate during the initial offset if the context
@ -558,6 +561,7 @@ func TestScrapeLoopRun(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
go func() { go func() {
@ -590,6 +594,51 @@ func TestScrapeLoopRun(t *testing.T) {
} }
} }
func TestScrapeLoopMetadata(t *testing.T) {
var (
signal = make(chan struct{})
scraper = &testScraper{}
cache = newScrapeCache()
)
defer close(signal)
ctx, cancel := context.WithCancel(context.Background())
sl := newScrapeLoop(ctx,
scraper,
nil, nil,
nopMutator,
nopMutator,
func() storage.Appender { return nopAppender{} },
cache,
)
defer cancel()
total, _, err := sl.append([]byte(`
# TYPE test_metric counter
# HELP test_metric some help text
# other comment
test_metric 1
# TYPE test_metric_no_help gauge
# HELP test_metric_no_type other help text`), time.Now())
testutil.Ok(t, err)
testutil.Equals(t, 1, total)
md, ok := cache.getMetadata("test_metric")
testutil.Assert(t, ok, "expected metadata to be present")
testutil.Assert(t, textparse.MetricTypeCounter == md.Type, "unexpected metric type")
testutil.Equals(t, "some help text", md.Help)
md, ok = cache.getMetadata("test_metric_no_help")
testutil.Assert(t, ok, "expected metadata to be present")
testutil.Assert(t, textparse.MetricTypeGauge == md.Type, "unexpected metric type")
testutil.Equals(t, "", md.Help)
md, ok = cache.getMetadata("test_metric_no_type")
testutil.Assert(t, ok, "expected metadata to be present")
testutil.Assert(t, textparse.MetricTypeUntyped == md.Type, "unexpected metric type")
testutil.Equals(t, "other help text", md.Help)
}
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
appender := &collectResultAppender{} appender := &collectResultAppender{}
var ( var (
@ -606,6 +655,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
numScrapes := 0 numScrapes := 0
@ -663,6 +713,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
@ -766,6 +817,7 @@ func TestScrapeLoopAppend(t *testing.T) {
return mutateReportSampleLabels(l, discoveryLabels) return mutateReportSampleLabels(l, discoveryLabels)
}, },
func() storage.Appender { return app }, func() storage.Appender { return app },
nil,
) )
now := time.Now() now := time.Now()
@ -804,6 +856,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
func() storage.Appender { return app }, func() storage.Appender { return app },
nil,
) )
// Get the value of the Counter before performing the append. // Get the value of the Counter before performing the append.
@ -863,6 +916,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
func() storage.Appender { return capp }, func() storage.Appender { return capp },
nil,
) )
now := time.Now() now := time.Now()
@ -901,6 +955,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
func() storage.Appender { return app }, func() storage.Appender { return app },
nil,
) )
now := time.Now() now := time.Now()
@ -945,6 +1000,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
func() storage.Appender { return app }, func() storage.Appender { return app },
nil,
) )
now := time.Now() now := time.Now()
@ -983,6 +1039,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1011,6 +1068,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
nopMutator, nopMutator,
nopMutator, nopMutator,
app, app,
nil,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1056,6 +1114,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
nopMutator, nopMutator,
nopMutator, nopMutator,
func() storage.Appender { return app }, func() storage.Appender { return app },
nil,
) )
now := time.Unix(1, 0) now := time.Unix(1, 0)
@ -1088,6 +1147,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)), maxTime: timestamp.FromTime(time.Now().Add(10 * time.Minute)),
} }
}, },
nil,
) )
now := time.Now().Add(20 * time.Minute) now := time.Now().Add(20 * time.Minute)

View file

@ -29,6 +29,7 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/pkg/relabel"
"github.com/prometheus/prometheus/pkg/textparse"
"github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
@ -56,6 +57,7 @@ type Target struct {
lastError error lastError error
lastScrape time.Time lastScrape time.Time
health TargetHealth health TargetHealth
metadata metricMetadataStore
} }
// NewTarget creates a reasonably configured target for querying. // NewTarget creates a reasonably configured target for querying.
@ -72,6 +74,45 @@ func (t *Target) String() string {
return t.URL().String() return t.URL().String()
} }
type metricMetadataStore interface {
listMetadata() []MetricMetadata
getMetadata(metric string) (MetricMetadata, bool)
}
// MetricMetadata is a piece of metadata for a metric.
type MetricMetadata struct {
Metric string
Type textparse.MetricType
Help string
}
func (t *Target) MetadataList() []MetricMetadata {
t.mtx.RLock()
defer t.mtx.RUnlock()
if t.metadata == nil {
return nil
}
return t.metadata.listMetadata()
}
// Metadata returns type and help metadata for the given metric.
func (t *Target) Metadata(metric string) (MetricMetadata, bool) {
t.mtx.RLock()
defer t.mtx.RUnlock()
if t.metadata == nil {
return MetricMetadata{}, false
}
return t.metadata.getMetadata(metric)
}
func (t *Target) setMetadataStore(s metricMetadataStore) {
t.mtx.Lock()
defer t.mtx.Unlock()
t.metadata = s
}
// hash returns an identifying hash for the target. // hash returns an identifying hash for the target.
func (t *Target) hash() uint64 { func (t *Target) hash() uint64 {
h := fnv.New64a() h := fnv.New64a()

View file

@ -15,6 +15,7 @@ package remote
import ( import (
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"sort" "sort"
@ -28,9 +29,12 @@ import (
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
// decodeReadLimit is the maximum size of a read request body in bytes.
const decodeReadLimit = 32 * 1024 * 1024
// DecodeReadRequest reads a remote.Request from a http.Request. // DecodeReadRequest reads a remote.Request from a http.Request.
func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) { func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) {
compressed, err := ioutil.ReadAll(r.Body) compressed, err := ioutil.ReadAll(io.LimitReader(r.Body, decodeReadLimit))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -35,6 +35,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/textparse"
"github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
@ -63,6 +64,7 @@ const (
errorBadData errorType = "bad_data" errorBadData errorType = "bad_data"
errorInternal errorType = "internal" errorInternal errorType = "internal"
errorUnavailable errorType = "unavailable" errorUnavailable errorType = "unavailable"
errorNotFound errorType = "not_found"
) )
var corsHeaders = map[string]string{ var corsHeaders = map[string]string{
@ -186,6 +188,7 @@ func (api *API) Register(r *route.Router) {
r.Del("/series", wrap(api.dropSeries)) r.Del("/series", wrap(api.dropSeries))
r.Get("/targets", wrap(api.targets)) r.Get("/targets", wrap(api.targets))
r.Get("/targets/metadata", wrap(api.targetMetadata))
r.Get("/alertmanagers", wrap(api.alertmanagers)) r.Get("/alertmanagers", wrap(api.alertmanagers))
r.Get("/status/config", wrap(api.serveConfig)) r.Get("/status/config", wrap(api.serveConfig))
@ -461,7 +464,6 @@ func (api *API) targets(r *http.Request) (interface{}, *apiError, func()) {
res := &TargetDiscovery{ActiveTargets: make([]*Target, len(tActive)), DroppedTargets: make([]*DroppedTarget, len(tDropped))} res := &TargetDiscovery{ActiveTargets: make([]*Target, len(tActive)), DroppedTargets: make([]*DroppedTarget, len(tDropped))}
for i, t := range tActive { for i, t := range tActive {
lastErrStr := "" lastErrStr := ""
lastErr := t.LastError() lastErr := t.LastError()
if lastErr != nil { if lastErr != nil {
@ -486,6 +488,68 @@ func (api *API) targets(r *http.Request) (interface{}, *apiError, func()) {
return res, nil, nil return res, nil, nil
} }
func (api *API) targetMetadata(r *http.Request) (interface{}, *apiError, func()) {
limit := -1
if s := r.FormValue("limit"); s != "" {
var err error
if limit, err = strconv.Atoi(s); err != nil {
return nil, &apiError{errorBadData, fmt.Errorf("limit must be a number")}, nil
}
}
matchers, err := promql.ParseMetricSelector(r.FormValue("match_target"))
if err != nil {
return nil, &apiError{errorBadData, err}, nil
}
metric := r.FormValue("metric")
var res []metricMetadata
Outer:
for _, t := range api.targetRetriever.TargetsActive() {
if limit >= 0 && len(res) >= limit {
break
}
for _, m := range matchers {
// Filter targets that don't satisfy the label matchers.
if !m.Matches(t.Labels().Get(m.Name)) {
continue Outer
}
}
// If no metric is specified, get the full list for the target.
if metric == "" {
for _, md := range t.MetadataList() {
res = append(res, metricMetadata{
Target: t.Labels(),
Metric: md.Metric,
Type: md.Type,
Help: md.Help,
})
}
continue
}
// Get metadata for the specified metric.
if md, ok := t.Metadata(metric); ok {
res = append(res, metricMetadata{
Target: t.Labels(),
Type: md.Type,
Help: md.Help,
})
}
}
if len(res) == 0 {
return nil, &apiError{errorNotFound, errors.New("specified metadata not found")}, nil
}
return res, nil, nil
}
type metricMetadata struct {
Target labels.Labels `json:"target"`
Metric string `json:"metric,omitempty"`
Type textparse.MetricType `json:"type"`
Help string `json:"help"`
}
// AlertmanagerDiscovery has all the active Alertmanagers. // AlertmanagerDiscovery has all the active Alertmanagers.
type AlertmanagerDiscovery struct { type AlertmanagerDiscovery struct {
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"` ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
@ -783,6 +847,8 @@ func respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
code = http.StatusServiceUnavailable code = http.StatusServiceUnavailable
case errorInternal: case errorInternal:
code = http.StatusInternalServerError code = http.StatusInternalServerError
case errorNotFound:
code = http.StatusNotFound
default: default:
code = http.StatusInternalServerError code = http.StatusInternalServerError
} }