Merge branch 'beorn/storage-ng-with-commit-history-cleaned-up'

Conflicts:
	Makefile
	Makefile.INCLUDE
	VERSION
	main.go
	notification/notification.go
	retrieval/target.go
	retrieval/target_test.go
	retrieval/targetmanager.go
	retrieval/targetmanager_test.go
	retrieval/targetpool.go
	retrieval/targetpool_test.go
	rules/ast/functions.go
	rules/rules_test.go
	storage/metric/interface.go
	storage/metric/tiered/curator.go
	storage/metric/tiered/end_to_end_test.go
	storage/metric/tiered/leveldb.go
	storage/metric/tiered/memory.go
	storage/metric/tiered/memory_test.go
	storage/metric/tiered/tiered.go
	storage/remote/queue_manager.go
	templates/templates.go
	templates/templates_test.go
	web/api/query.go
	web/consoles.go
	web/web.go

Change-Id: I96e6312b51e877d4434fe96c494e9558fe2e1d16
This commit is contained in:
Bjoern Rabenstein 2014-11-25 17:36:17 +01:00
commit 3a17aeabfd
126 changed files with 7479 additions and 15358 deletions

View file

@ -15,123 +15,7 @@
include ../Makefile.INCLUDE
all: dependencies-stamp
bison-stamp: bison-implementation-$(UNAME)-stamp
[ -x "$$(which bison)" ] || { echo "bison not found." ; false ; }
touch $@
bison-implementation-Darwin-stamp:
[ -x "$$(which bison)" ] || $(BREW_INSTALL) bison
touch $@
bison-implementation-Linux-stamp:
[ -x "$$(which bison)" ] || $(APT_GET_INSTALL) bison
touch $@
cache-stamp:
$(MAKE) -C cache
touch $@
cc-stamp: cc-implementation-$(UNAME)-stamp
[ -x "$$(which cc)" ] || { echo "cc not found." ; false ; }
touch $@
cc-implementation-Darwin-stamp:
[ -x "$$(which cc)" ] || { echo "Install XCode?" ; false ; }
touch $@
cc-implementation-Linux-stamp:
[ -x "$$(which cc)" ] || $(APT_GET_INSTALL) build-essential
touch $@
dependencies-stamp: cache-stamp cc-stamp leveldb-stamp snappy-stamp godns-stamp
touch $@
goprotobuf-protoc-gen-go-stamp: protoc-stamp goprotobuf-stamp
$(GO_GET) code.google.com/p/goprotobuf/protoc-gen-go $(THIRD_PARTY_BUILD_OUTPUT)
touch $@
goprotobuf-stamp: protoc-stamp
$(GO_GET) code.google.com/p/goprotobuf/proto $(THIRD_PARTY_BUILD_OUTPUT)
touch $@
godns-stamp:
$(GO_GET) github.com/miekg/dns $(THIRD_PARTY_BUILD_OUTPUT)
touch $@
leveldb-stamp: cache-stamp cache/leveldb-$(LEVELDB_VERSION).tar.gz cc-stamp rsync-stamp snappy-stamp
tar xzvf cache/leveldb-$(LEVELDB_VERSION).tar.gz -C dirty $(THIRD_PARTY_BUILD_OUTPUT)
cd dirty/leveldb-$(LEVELDB_VERSION) && CFLAGS="$(CFLAGS) -lsnappy" CXXFLAGS="$(CXXFLAGS) -lsnappy $(LDFLAGS)" LDFLAGS="-lsnappy $(LDFLAGS)" bash -x ./build_detect_platform build_config.mk ./
# The test that LevelDB uses to test for Snappy is naive and
# does not respect LDFLAGS. :-(
CFLAGS="$(CFLAGS) -lsnappy" CXXFLAGS="$(CXXFLAGS) -lsnappy $(LDFLAGS)" LDFLAGS="-lsnappy $(LDFLAGS)" $(MAKE) -C dirty/leveldb-$(LEVELDB_VERSION) $(THIRD_PARTY_BUILD_OUTPUT)
rsync -av "dirty/leveldb-$(LEVELDB_VERSION)/include/" "$(PREFIX)/include/" $(THIRD_PARTY_BUILD_OUTPUT)
-[ "$(UNAME)" = "Linux" ] && { rsync -av "dirty/leveldb-$(LEVELDB_VERSION)/"*.*so* "$(PREFIX)/lib/" ; } $(THIRD_PARTY_BUILD_OUTPUT) $(THIRD_PARTY_BUILD_OUTPUT)
-[ "$(UNAME)" = "Darwin" ] && { rsync -av "dirty/leveldb-$(LEVELDB_VERSION)/"*.*dylib* "$(PREFIX)/lib/" ; } $(THIRD_PARTY_BUILD_OUTPUT)
rsync -av "dirty/leveldb-$(LEVELDB_VERSION)/"*.a "$(PREFIX)/lib/" $(THIRD_PARTY_BUILD_OUTPUT)
touch $@
libunwind-stamp:
$(APT_GET_INSTALL) libunwind7
$(APT_GET_INSTALL) libunwind7-dev
touch $@
noop-target-stamp:
echo "Not doing anything."
touch $@
protoc-stamp: cache-stamp cache/protobuf-$(PROTOCOL_BUFFERS_VERSION).tar.bz2 cc-stamp
tar xjvf cache/protobuf-$(PROTOCOL_BUFFERS_VERSION).tar.bz2 -C dirty $(THIRD_PARTY_BUILD_OUTPUT)
cd dirty/protobuf-$(PROTOCOL_BUFFERS_VERSION) && ./configure --prefix="$(PREFIX)" $(THIRD_PARTY_BUILD_OUTPUT)
$(MAKE) -C dirty/protobuf-$(PROTOCOL_BUFFERS_VERSION) $(THIRD_PARTY_BUILD_OUTPUT)
$(MAKE) -C dirty/protobuf-$(PROTOCOL_BUFFERS_VERSION) install $(THIRD_PARTY_BUILD_OUTPUT)
[ -x "$$(which protoc)" ] || { echo "protoc not found." ; false ; }
touch $@
rsync-implementation-Darwin-stamp:
[ -x "$$(which rsync)" ] || $(BREW_INSTALL) rsync
touch $@
rsync-implementation-Linux-stamp:
[ -x "$$(which rsync)" ] || $(APT_GET_INSTALL) rsync
rsync-stamp: rsync-implementation-$(UNAME)-stamp
[ -x "$$(which rsync)" ] || { echo "rsync not found." ; false ; }
touch $@
snappy-stamp: cache-stamp cache/snappy-$(SNAPPY_VERSION).tar.gz cc-stamp
tar xzvf cache/snappy-$(SNAPPY_VERSION).tar.gz -C dirty $(THIRD_PARTY_BUILD_OUTPUT)
cd dirty/snappy-$(SNAPPY_VERSION) && ./configure --prefix="$(PREFIX)" $(THIRD_PARTY_BUILD_OUTPUT)
$(MAKE) -C dirty/snappy-$(SNAPPY_VERSION) $(THIRD_PARTY_BUILD_OUTPUT)
$(MAKE) -C dirty/snappy-$(SNAPPY_VERSION) install $(THIRD_PARTY_BUILD_OUTPUT)
touch $@
ifeq ($(UNAME), Linux)
stack-unwind-support-stamp: libunwind-stamp
touch $@
else
stack-unwind-support-stamp: noop-target-stamp
touch $@
endif
vim-implementation-Darwin-stamp:
[ -x "$$(which vim)" ] || $(BREW_INSTALL) vim
touch $@
vim-implementation-Linux-stamp:
[ -x "$$(which vim)" ] || $(APT_GET_INSTALL) vim
touch $@
vim-stamp: vim-implementation-$(UNAME)-stamp
touch $@
all:
clean:
$(MAKE) -C cache clean
$(MAKE) -C dirty clean
$(MAKE) -C root clean
$(MAKE) -C package clean
rm -rf *-stamp
.PHONY: clean

48
.build/cache/Makefile vendored
View file

@ -1,48 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.SUFFIXES:
include ../../Makefile.INCLUDE
all: populate
populate: leveldb-$(LEVELDB_VERSION).tar.gz protobuf-$(PROTOCOL_BUFFERS_VERSION).tar.bz2 snappy-$(SNAPPY_VERSION).tar.gz
leveldb-$(LEVELDB_VERSION).tar.gz: wget-stamp
$(WGET) http://leveldb.googlecode.com/files/leveldb-$(LEVELDB_VERSION).tar.gz
protobuf-$(PROTOCOL_BUFFERS_VERSION).tar.bz2: wget-stamp
$(WGET) http://protobuf.googlecode.com/files/$@
snappy-$(SNAPPY_VERSION).tar.gz: wget-stamp
$(WGET) http://snappy.googlecode.com/files/snappy-$(SNAPPY_VERSION).tar.gz
wget-implementation-Darwin-stamp:
[ -x "$$(which wget)" ] || $(BREW_INSTALL) wget
touch $@
wget-implementation-Linux-stamp:
[ -x "$$(which wget)" ] || $(APT_GET_INSTALL) wget
touch $@
wget-stamp: wget-implementation-$(UNAME)-stamp
[ -x "$$(which wget)" ] || { echo "wget not found." ; false ; }
touch $@
clean:
-[ -n "$(REALLY_CLEAN)" ] && rm -rf *.bz2
-[ -n "$(REALLY_CLEAN)" ] && rm -rf *.gz
rm -rf *-stamp
.PHONY: clean populate

View file

@ -1 +0,0 @@
*

View file

@ -1,22 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.SUFFIXES:
include ../../Makefile.INCLUDE
all:
clean:
rm -rf *
git checkout .

View file

@ -1 +0,0 @@
*

View file

@ -1,22 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.SUFFIXES:
include ../../Makefile.INCLUDE
all:
clean:
rm -rf *
git checkout .

View file

@ -1 +0,0 @@
*

View file

@ -1,29 +0,0 @@
#!/usr/bin/env bash
# If either of the two tests below fail, you may need to install GNU coreutils
# in your environment.
if [ ! -x "$(which readlink)" ]; then
echo "readlink tool cannot be found." > /dev/stderr
exit 1
fi
if [ ! -x "$(which dirname)" ]; then
echo "dirname tool cannot be found." > /dev/stderr
exit 1
fi
readonly binary="${0}"
readonly binary_path="$(readlink -f ${binary})"
readonly binary_directory="$(dirname ${binary_path})"
readonly platform=$(uname | tr '[:upper:]' '[:lower:]')
export LD_LIBRARY_PATH="${binary_directory}/lib:${LD_LIBRARY_PATH}"
if [[ "${platform}" == "darwin" ]]; then
export DYLD_LIBRARY_PATH="${binary_directory}/lib:${DYLD_LIBRARY_PATH}"
fi
exec "${binary_directory}/prometheus" "${@}"

View file

@ -22,14 +22,12 @@ $(GOCC): $(BUILD_PATH)/cache/$(GOPKG) $(FULL_GOPATH)
touch $@
advice:
$(GO) tool vet .
$(GO) vet ./...
binary: build
build: config dependencies model preparation tools web
build: config dependencies tools web
$(GO) build -o prometheus $(BUILDFLAGS) .
cp prometheus $(BUILD_PATH)/package/prometheus
rsync -av --delete $(BUILD_PATH)/root/lib/ $(BUILD_PATH)/package/lib/
docker: build
docker build -t prometheus:$(REV) .
@ -37,7 +35,7 @@ docker: build
tarball: $(ARCHIVE)
$(ARCHIVE): build
tar -C $(BUILD_PATH)/package -czf $(ARCHIVE) .
tar -czf $(ARCHIVE) prometheus
release: REMOTE ?= $(error "can't upload, REMOTE not set")
release: REMOTE_DIR ?= $(error "can't upload, REMOTE_DIR not set")
@ -49,7 +47,7 @@ tag:
git push --tags
$(BUILD_PATH)/cache/$(GOPKG):
curl -o $@ -L $(GOURL)/$(GOPKG)
$(CURL) -o $@ -L $(GOURL)/$(GOPKG)
benchmark: test
$(GO) test $(GO_TEST_FLAGS) -test.bench='Benchmark' ./...
@ -59,15 +57,15 @@ clean:
$(MAKE) -C tools clean
$(MAKE) -C web clean
rm -rf $(TEST_ARTIFACTS)
-rm prometheus.tar.gz
-find . -type f -iname '*~' -exec rm '{}' ';'
-find . -type f -iname '*#' -exec rm '{}' ';'
-find . -type f -iname '.#*' -exec rm '{}' ';'
-rm $(ARCHIVE)
-find . -type f -name '*~' -exec rm '{}' ';'
-find . -type f -name '*#' -exec rm '{}' ';'
-find . -type f -name '.#*' -exec rm '{}' ';'
config: dependencies preparation
config: dependencies
$(MAKE) -C config
dependencies: preparation
dependencies: $(GOCC) $(FULL_GOPATH)
$(GO) get -d
documentation: search_index
@ -76,14 +74,8 @@ documentation: search_index
format:
find . -iname '*.go' | egrep -v "^\./\.build|./generated|\.(l|y)\.go" | xargs -n1 $(GOFMT) -w -s=true
model: dependencies preparation
$(MAKE) -C model
preparation: $(GOCC) $(FULL_GOPATH)
$(MAKE) -C $(BUILD_PATH)
race_condition_binary: build
CGO_CFLAGS="-I$(BUILD_PATH)/root/include" CGO_LDFLAGS="-L$(BUILD_PATH)/root/lib" $(GO) build -race -o prometheus.race $(BUILDFLAGS) .
$(GO) build -race -o prometheus.race $(BUILDFLAGS) .
race_condition_run: race_condition_binary
./prometheus.race $(ARGUMENTS)
@ -94,7 +86,7 @@ run: binary
search_index:
godoc -index -write_index -index_files='search_index'
server: config dependencies model preparation
server: config dependencies
$(MAKE) -C server
# $(FULL_GOPATH) is responsible for ensuring that the builder has not done anything
@ -103,16 +95,13 @@ $(FULL_GOPATH):
-[ -d "$(FULL_GOPATH)" ] || { mkdir -vp $(FULL_GOPATH_BASE) ; ln -s "$(PWD)" "$(FULL_GOPATH)" ; }
[ -d "$(FULL_GOPATH)" ]
test: config dependencies model preparation tools web
test: config dependencies tools web
$(GO) test $(GO_TEST_FLAGS) ./...
tools: dependencies preparation
tools: dependencies
$(MAKE) -C tools
update:
$(GO) get -d
web: config dependencies model preparation
web: config dependencies
$(MAKE) -C web
.PHONY: advice binary build clean config dependencies documentation format model preparation race_condition_binary race_condition_run release run search_index tag tarball test tools update
.PHONY: advice binary build clean config dependencies documentation format race_condition_binary race_condition_run release run search_index tag tarball test tools

View file

@ -15,15 +15,7 @@
.SUFFIXES:
# Set this to "false" to provide verbose builds of third-party components,
# namely C and C++ dependencies.
export SILENCE_THIRD_PARTY_BUILDS := true
ifeq ($(SILENCE_THIRD_PARTY_BUILDS), true)
export THIRD_PARTY_BUILD_OUTPUT := >/dev/null 2>&1
else
export THIRD_PARTY_BUILD_OUTPUT :=
endif
VERSION=0.8.0
OS=$(shell uname)
ARCH=$(shell uname -m)
@ -34,7 +26,7 @@ MAC_OS_X_VERSION ?= 10.8
BUILD_PATH = $(PWD)/.build
GO_VERSION := 1.3
GO_VERSION := 1.3.3
GOOS = $(subst Darwin,darwin,$(subst Linux,linux,$(OS)))
ifeq ($(GOOS),darwin)
@ -54,42 +46,18 @@ GOENV = TMPDIR=$(TMPDIR) GOROOT=$(GOROOT) GOPATH=$(GOPATH)
GO = $(GOENV) $(GOCC)
GOFMT = $(GOROOT)/bin/gofmt
LEVELDB_VERSION := 1.14.0
PROTOCOL_BUFFERS_VERSION := 2.5.0
SNAPPY_VERSION := 1.1.0
UNAME := $(shell uname)
FULL_GOPATH := $(GOPATH)/src/github.com/prometheus/prometheus
FULL_GOPATH_BASE := $(GOPATH)/src/github.com/prometheus
export PREFIX=$(BUILD_PATH)/root
export LOCAL_BINARIES=$(PREFIX)/bin
export PATH := $(GOPATH)/bin:$(PATH)
export PATH := $(LOCAL_BINARIES):$(GOPATH)/bin:$(PATH)
export LD_LIBRARY_PATH := $(PREFIX)/lib:$(LD_LIBRARY_PATH)
export CFLAGS := $(CFLAGS) -I$(PREFIX)/include -O3
export CXXFLAGS := $(CXXFLAGS) -I$(PREFIX)/include -O3
export CPPFLAGS := $(CPPFLAGS) -I$(PREFIX)/include -O3
export LDFLAGS := $(LDFLAGS) -L$(PREFIX)/lib
export PKG_CONFIG_PATH := $(PREFIX)/lib/pkgconfig:$(PKG_CONFIG_PATH)
export CGO_CFLAGS = $(CFLAGS)
export CGO_LDFLAGS = $(LDFLAGS)
export GO_TEST_FLAGS ?= "-v"
export GO_TEST_FLAGS ?= "-v -short"
GO_GET := $(GO) get -u -v -x
APT_GET_INSTALL := sudo apt-get install -y
BREW_INSTALL := brew install
# By default, wget sets the creation time to match the server's, which throws
# off Make. :-(
#
# Set WGET_OPTIONS to include ``--no-use-server-timestamps`` to alleviate this.
WGET := wget $(WGET_OPTIONS) -c
VERSION := $(shell cat VERSION)
REV := $(shell git rev-parse --short HEAD)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
HOSTNAME := $(shell hostname -f)
@ -100,11 +68,8 @@ BUILDFLAGS := -ldflags \
-X main.buildBranch $(BRANCH)\
-X main.buildUser $(USER)@$(HOSTNAME)\
-X main.buildDate $(BUILD_DATE)\
-X main.goVersion $(GO_VERSION)\
-X main.leveldbVersion $(LEVELDB_VERSION)\
-X main.protobufVersion $(PROTOCOL_BUFFERS_VERSION)\
-X main.snappyVersion $(SNAPPY_VERSION)"
PROTOC := $(LOCAL_BINARIES)/protoc
-X main.goVersion $(GO_VERSION)"
PROTOC := protoc
CURL := curl
ARCHIVE := prometheus-$(VERSION).$(GOOS)-$(GOARCH).tar.gz

107
README.md
View file

@ -13,88 +13,67 @@ The system is designed to collect telemetry from named targets on given
intervals, evaluate rule expressions, display the results, and trigger an
action if some condition is observed to be true.
## Prerequisites
If you read below in the _Getting Started_ section, the build infrastructure
will take care of the following things for you in most cases:
TODO: The above description is somewhat esoteric. Rephrase it into
somethith that tells normal people how they will usually benefit from
using Prometheus.
1. Go 1.1.
2. LevelDB: [https://code.google.com/p/leveldb/](https://code.google.com/p/leveldb/).
3. Protocol Buffers Compiler: [http://code.google.com/p/protobuf/](http://code.google.com/p/protobuf/).
4. goprotobuf: the code generator and runtime library: [http://code.google.com/p/goprotobuf/](http://code.google.com/p/goprotobuf/).
5. Levigo, a Go-wrapper around LevelDB's C library: [https://github.com/jmhodges/levigo](https://github.com/jmhodges/levigo).
6. GoRest, a RESTful style web-services framework: [http://code.google.com/p/gorest/](http://code.google.com/p/gorest/).
7. Prometheus Client, Prometheus in Prometheus [https://github.com/prometheus/client_golang](https://github.com/prometheus/client_golang).
8. Snappy, a compression library for LevelDB and Levigo [http://code.google.com/p/snappy/](http://code.google.com/p/snappy/).
## Install
## Getting Started
There are various ways of installing Prometheus.
For basic help how to get started:
### Precompiled packages
* The source code is periodically indexed: [Prometheus Core](http://godoc.org/github.com/prometheus/prometheus).
* For UNIX-like environment users, please consult the Travis CI configuration in _.travis.yml_ and _Makefile_.
* All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
We plan to provide precompiled binaries for various platforms and even
packages for common Linux distribution soon. Once those are offered,
it will be the recommended way of installing Prometheus.
### General
### Use `make`
For first time users, simply run the following:
In most cirumstances, the following should work:
$ make
$ ARGUMENTS="-configFile=documentation/examples/prometheus.conf" make run
$ ARGUMENTS="-config.file=documentation/examples/prometheus.conf" make run
``${ARGUMENTS}`` is passed verbatim into the makefile and thusly Prometheus as
``$(ARGUMENTS)``. This is useful for quick one-off invocations and smoke
testing.
``${ARGUMENTS}`` is passed verbatim to the commandline starting the Prometheus binary.
This is useful for quick one-off invocations and smoke testing.
If you run into problems, try the following:
The above requires a number of common tools to be installed, namely
`curl`, `git`, `gzip`, `hg` (Mercurial CLI), `sed`, `xxd`. Should you
need to change any of the protocol buffer definition files
(`*.proto`), you also need the protocol buffer compiler
[`protoc`](http://code.google.com/p/protobuf/](http://code.google.com/p/protobuf/),
v2.5.0 or higher, in your `$PATH`.
$ SILENCE_THIRD_PARTY_BUILDS=false make
Everything else will be downloaded and installed into a staging
environment in the `.build` sub-directory. That includes a Go
development environment of the appropriate version.
Upon having a satisfactory build, it's possible to create an artifact for
end-user distribution:
The `Makefile` offers a number of useful targets. Some examples:
$ make package
$ find build/package
* `make test` runs tests.
* `make tarball` creates a tar ball with the binary for distribution.
* `make race_condition_run` compiles and runs a binary with the race detector enabled.
``build/package`` will be sufficient for whatever archiving mechanism you
choose. The important thing to note is that Go presently does not
staticly link against C dependency libraries, so including the ``lib``
directory is paramount. Providing ``LD_LIBRARY_PATH`` or
``DYLD_LIBRARY_PATH`` in a scaffolding shell script is advised.
### Use your own Go development environment
Using your own Go development environment with the usual tooling is
possible, too, but you have to take care of various generated files
(usually by running `make` in the respective sub-directory):
### Problems
If at any point you run into an error with the ``make`` build system in terms of
its not properly scaffolding things on a given environment, please file a bug or
open a pull request with your changes if you can fix it yourself.
* Compiling the protocol buffer definitions in `config` (only if you have changed them).
* Generating the parser and lexer code in `rules` (only if you have changed `parser.y` or `lexer.l`).
* The `files.go` blob in `web/blob`, which embeds the static web content into the binary.
Please note that we're explicitly shooting for stable runtime environments and
not the latest-whiz bang releases; thusly, we ask you to provide ample
architecture and release identification remarks for us.
Furthermore, the build info (see `build_info.go`) will not be
populated if you simply run `go build`. You have to pass in command
line flags as defined in `Makefile.INCLUDE` (see `${BUILDFLAGS}`) to
do that.
## Testing
## More information
$ make test
## Packaging
$ make package
### Race Detector
Go 1.1 includes a [race detector](http://tip.golang.org/doc/articles/race_detector.html)
which can be enabled at build time. Here's how to use it with Prometheus
(assumes that you've already run a successful build).
To run the tests with race detection:
$ GORACE="log_path=/tmp/foo" go test -race ./...
To run the server with race detection:
$ go build -race .
$ GORACE="log_path=/tmp/foo" ./prometheus
[![Build Status](https://travis-ci.org/prometheus/prometheus.png)](https://travis-ci.org/prometheus/prometheus)
* The source code is periodically indexed: [Prometheus Core](http://godoc.org/github.com/prometheus/prometheus).
* You will find a Travis CI configuration in `.travis.yml`.
* All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
## Contributing
@ -102,4 +81,4 @@ Refer to [CONTRIBUTING.md](CONTRIBUTING.md)
## License
Apache License 2.0
Apache License 2.0, see [LICENSE](LICENSE).

View file

@ -1 +0,0 @@
0.8.0

View file

@ -19,29 +19,23 @@ import (
// Build information. Populated by Makefile.
var (
buildVersion string
buildRevision string
buildBranch string
buildUser string
buildDate string
goVersion string
leveldbVersion string
protobufVersion string
snappyVersion string
buildVersion string
buildRevision string
buildBranch string
buildUser string
buildDate string
goVersion string
)
// BuildInfo encapsulates compile-time metadata about Prometheus made available
// via go tool ld such that this can be reported on-demand.
var BuildInfo = map[string]string{
"version": buildVersion,
"revision": buildRevision,
"branch": buildBranch,
"user": buildUser,
"date": buildDate,
"go_version": goVersion,
"leveldb_version": leveldbVersion,
"protobuf_version": protobufVersion,
"snappy_version": snappyVersion,
"version": buildVersion,
"revision": buildRevision,
"branch": buildBranch,
"user": buildUser,
"date": buildDate,
"go_version": goVersion,
}
var versionInfoTmpl = template.Must(template.New("version").Parse(
@ -49,7 +43,4 @@ var versionInfoTmpl = template.Must(template.New("version").Parse(
build user: {{.user}}
build date: {{.date}}
go version: {{.go_version}}
leveldb version: {{.leveldb_version}}
protobuf version: {{.protobuf_version}}
snappy version: {{.snappy_version}}
`))

View file

@ -1,42 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package indexable
import (
"encoding/binary"
clientmodel "github.com/prometheus/client_golang/model"
)
// EncodeTimeInto writes the provided time into the specified buffer subject
// to the LevelDB big endian key sort order requirement.
func EncodeTimeInto(dst []byte, t clientmodel.Timestamp) {
binary.BigEndian.PutUint64(dst, uint64(t.Unix()))
}
// EncodeTime converts the provided time into a byte buffer subject to the
// LevelDB big endian key sort order requirement.
func EncodeTime(t clientmodel.Timestamp) []byte {
buffer := make([]byte, 8)
EncodeTimeInto(buffer, t)
return buffer
}
// DecodeTime deserializes a big endian byte array into a Unix time in UTC,
// omitting granularity precision less than a second.
func DecodeTime(src []byte) clientmodel.Timestamp {
return clientmodel.TimestampFromUnix(int64(binary.BigEndian.Uint64(src)))
}

View file

@ -1,39 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package indexable
import (
"math/rand"
"testing"
"testing/quick"
clientmodel "github.com/prometheus/client_golang/model"
)
func TestTimeEndToEnd(t *testing.T) {
tester := func(x int) bool {
random := rand.New(rand.NewSource(int64(x)))
buffer := make([]byte, 8)
incoming := clientmodel.TimestampFromUnix(random.Int63())
EncodeTimeInto(buffer, incoming)
outgoing := DecodeTime(buffer)
return incoming.Equal(outgoing) && incoming.Unix() == outgoing.Unix()
}
if err := quick.Check(tester, nil); err != nil {
t.Error(err)
}
}

View file

@ -17,12 +17,6 @@ SUFFIXES:
include ../Makefile.INCLUDE
# In order to build the generated targets in this directory, run the
# following:
#
# make -C build goprotobuf-protoc-gen-go-stamp
generated/config.pb.go: config.proto
$(MAKE) -C ../.build goprotobuf-protoc-gen-go-stamp
$(GO_GET) code.google.com/p/goprotobuf/protoc-gen-go
$(PROTOC) --proto_path=$(PREFIX)/include:. --go_out=generated/ config.proto

468
main.go
View file

@ -26,12 +26,13 @@ import (
registry "github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
registry "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notification"
"github.com/prometheus/prometheus/retrieval"
"github.com/prometheus/prometheus/rules/manager"
"github.com/prometheus/prometheus/storage/metric/tiered"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/storage/remote/opentsdb"
"github.com/prometheus/prometheus/web"
@ -42,208 +43,99 @@ const deletionBatchSize = 100
// Commandline flags.
var (
configFile = flag.String("configFile", "prometheus.conf", "Prometheus configuration file name.")
metricsStoragePath = flag.String("metricsStoragePath", "/tmp/metrics", "Base path for metrics storage.")
configFile = flag.String("config.file", "prometheus.conf", "Prometheus configuration file name.")
alertmanagerUrl = flag.String("alertmanager.url", "", "The URL of the alert manager to send notifications to.")
alertmanagerURL = flag.String("alertmanager.url", "", "The URL of the alert manager to send notifications to.")
notificationQueueCapacity = flag.Int("alertmanager.notification-queue-capacity", 100, "The capacity of the queue for pending alert manager notifications.")
metricsStoragePath = flag.String("storage.local.path", "/tmp/metrics", "Base path for metrics storage.")
remoteTSDBUrl = flag.String("storage.remote.url", "", "The URL of the OpenTSDB instance to send samples to.")
remoteTSDBTimeout = flag.Duration("storage.remote.timeout", 30*time.Second, "The timeout to use when sending samples to OpenTSDB.")
samplesQueueCapacity = flag.Int("storage.queue.samplesCapacity", 4096, "The size of the unwritten samples queue.")
diskAppendQueueCapacity = flag.Int("storage.queue.diskAppendCapacity", 1000000, "The size of the queue for items that are pending writing to disk.")
memoryAppendQueueCapacity = flag.Int("storage.queue.memoryAppendCapacity", 10000, "The size of the queue for items that are pending writing to memory.")
samplesQueueCapacity = flag.Int("storage.incoming-samples-queue-capacity", 4096, "The capacity of the queue of samples to be stored.")
compactInterval = flag.Duration("compact.interval", 3*time.Hour, "The amount of time between compactions.")
compactGroupSize = flag.Int("compact.groupSize", 500, "The minimum group size for compacted samples.")
compactAgeInclusiveness = flag.Duration("compact.ageInclusiveness", 5*time.Minute, "The age beyond which samples should be compacted.")
numMemoryChunks = flag.Int("storage.local.memory-chunks", 1024*1024, "How many chunks to keep in memory. While the size of a chunk is 1kiB, the total memory usage will be significantly higher than this value * 1kiB. Furthermore, for various reasons, more chunks might have to be kept in memory temporarily.")
deleteInterval = flag.Duration("delete.interval", 11*time.Hour, "The amount of time between deletion of old values.")
storageRetentionPeriod = flag.Duration("storage.local.retention", 15*24*time.Hour, "How long to retain samples in the local storage.")
deleteAge = flag.Duration("delete.ageMaximum", 15*24*time.Hour, "The relative maximum age for values before they are deleted.")
checkpointInterval = flag.Duration("storage.local.checkpoint-interval", 5*time.Minute, "The period at which the in-memory index of time series is checkpointed.")
arenaFlushInterval = flag.Duration("arena.flushInterval", 15*time.Minute, "The period at which the in-memory arena is flushed to disk.")
arenaTTL = flag.Duration("arena.ttl", 10*time.Minute, "The relative age of values to purge to disk from memory.")
storageDirty = flag.Bool("storage.local.dirty", false, "If set, the local storage layer will perform crash recovery even if the last shutdown appears to be clean.")
notificationQueueCapacity = flag.Int("alertmanager.notificationQueueCapacity", 100, "The size of the queue for pending alert manager notifications.")
printVersion = flag.Bool("version", false, "Print version information.")
)
printVersion = flag.Bool("version", false, "print version information")
shutdownTimeout = flag.Duration("shutdownGracePeriod", 0*time.Second, "The amount of time Prometheus gives background services to finish running when shutdown is requested.")
// Instrumentation.
var (
samplesQueueCapDesc = registry.NewDesc(
"prometheus_samples_queue_capacity",
"Capacity of the queue for unwritten samples.",
nil, nil,
)
samplesQueueLenDesc = registry.NewDesc(
"prometheus_samples_queue_length",
"Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).",
nil, nil,
)
)
type prometheus struct {
compactionTimer *time.Ticker
deletionTimer *time.Ticker
curationSema chan struct{}
stopBackgroundOperations chan struct{}
unwrittenSamples chan *extraction.Result
ruleManager manager.RuleManager
targetManager retrieval.TargetManager
notifications chan notification.NotificationReqs
storage *tiered.TieredStorage
remoteTSDBQueue *remote.TSDBQueueManager
ruleManager manager.RuleManager
targetManager retrieval.TargetManager
notificationHandler *notification.NotificationHandler
storage local.Storage
remoteTSDBQueue *remote.TSDBQueueManager
curationState tiered.CurationStateUpdater
webService *web.WebService
closeOnce sync.Once
}
func (p *prometheus) interruptHandler() {
notifier := make(chan os.Signal)
signal.Notify(notifier, os.Interrupt, syscall.SIGTERM)
<-notifier
glog.Warning("Received SIGINT/SIGTERM; Exiting gracefully...")
p.Close()
os.Exit(0)
}
func (p *prometheus) compact(olderThan time.Duration, groupSize int) error {
select {
case s, ok := <-p.curationSema:
if !ok {
glog.Warning("Prometheus is shutting down; no more curation runs are allowed.")
return nil
}
defer func() {
p.curationSema <- s
}()
default:
glog.Warningf("Deferred compaction for %s and %s due to existing operation.", olderThan, groupSize)
return nil
}
processor := tiered.NewCompactionProcessor(&tiered.CompactionProcessorOptions{
MaximumMutationPoolBatch: groupSize * 3,
MinimumGroupSize: groupSize,
})
defer processor.Close()
curator := tiered.NewCurator(&tiered.CuratorOptions{
Stop: p.stopBackgroundOperations,
ViewQueue: p.storage.ViewQueue,
})
defer curator.Close()
return curator.Run(olderThan, clientmodel.Now(), processor, p.storage.DiskStorage.CurationRemarks, p.storage.DiskStorage.MetricSamples, p.storage.DiskStorage.MetricHighWatermarks, p.curationState)
}
func (p *prometheus) delete(olderThan time.Duration, batchSize int) error {
select {
case s, ok := <-p.curationSema:
if !ok {
glog.Warning("Prometheus is shutting down; no more curation runs are allowed.")
return nil
}
defer func() {
p.curationSema <- s
}()
default:
glog.Warningf("Deferred deletion for %s due to existing operation.", olderThan)
return nil
}
processor := tiered.NewDeletionProcessor(&tiered.DeletionProcessorOptions{
MaximumMutationPoolBatch: batchSize,
})
defer processor.Close()
curator := tiered.NewCurator(&tiered.CuratorOptions{
Stop: p.stopBackgroundOperations,
ViewQueue: p.storage.ViewQueue,
})
defer curator.Close()
return curator.Run(olderThan, clientmodel.Now(), processor, p.storage.DiskStorage.CurationRemarks, p.storage.DiskStorage.MetricSamples, p.storage.DiskStorage.MetricHighWatermarks, p.curationState)
}
func (p *prometheus) Close() {
p.closeOnce.Do(p.close)
}
func (p *prometheus) close() {
// The "Done" remarks are a misnomer for some subsystems due to lack of
// blocking and synchronization.
glog.Info("Shutdown has been requested; subsytems are closing:")
p.targetManager.Stop()
glog.Info("Remote Target Manager: Done")
p.ruleManager.Stop()
glog.Info("Rule Executor: Done")
// Stop any currently active curation (deletion or compaction).
close(p.stopBackgroundOperations)
glog.Info("Current Curation Workers: Requested")
// Disallow further curation work.
close(p.curationSema)
// Stop curation timers.
if p.compactionTimer != nil {
p.compactionTimer.Stop()
}
if p.deletionTimer != nil {
p.deletionTimer.Stop()
}
glog.Info("Future Curation Workers: Done")
glog.Infof("Waiting %s for background systems to exit and flush before finalizing (DO NOT INTERRUPT THE PROCESS) ...", *shutdownTimeout)
// Wart: We should have a concrete form of synchronization for this, not a
// hokey sleep statement.
time.Sleep(*shutdownTimeout)
close(p.unwrittenSamples)
p.storage.Close()
glog.Info("Local Storage: Done")
if p.remoteTSDBQueue != nil {
p.remoteTSDBQueue.Close()
glog.Info("Remote Storage: Done")
}
close(p.notifications)
glog.Info("Sundry Queues: Done")
glog.Info("See you next time!")
}
func main() {
// TODO(all): Future additions to main should be, where applicable, glumped
// into the prometheus struct above---at least where the scoping of the entire
// server is concerned.
flag.Parse()
versionInfoTmpl.Execute(os.Stdout, BuildInfo)
if *printVersion {
os.Exit(0)
}
// NewPrometheus creates a new prometheus object based on flag values.
// Call Serve() to start serving and Close() for clean shutdown.
func NewPrometheus() *prometheus {
conf, err := config.LoadFromFile(*configFile)
if err != nil {
glog.Fatalf("Error loading configuration from %s: %v", *configFile, err)
}
ts, err := tiered.NewTieredStorage(uint(*diskAppendQueueCapacity), 100, *arenaFlushInterval, *arenaTTL, *metricsStoragePath)
if err != nil {
glog.Fatal("Error opening storage: ", err)
unwrittenSamples := make(chan *extraction.Result, *samplesQueueCapacity)
ingester := &retrieval.MergeLabelsIngester{
Labels: conf.GlobalLabels(),
CollisionPrefix: clientmodel.ExporterLabelPrefix,
Ingester: retrieval.ChannelIngester(unwrittenSamples),
}
targetManager := retrieval.NewTargetManager(ingester)
targetManager.AddTargetsFromConfig(conf)
notificationHandler := notification.NewNotificationHandler(*alertmanagerURL, *notificationQueueCapacity)
o := &local.MemorySeriesStorageOptions{
MemoryChunks: *numMemoryChunks,
PersistenceStoragePath: *metricsStoragePath,
PersistenceRetentionPeriod: *storageRetentionPeriod,
CheckpointInterval: *checkpointInterval,
Dirty: *storageDirty,
}
memStorage, err := local.NewMemorySeriesStorage(o)
if err != nil {
glog.Fatal("Error opening memory series storage: ", err)
}
ruleManager := manager.NewRuleManager(&manager.RuleManagerOptions{
Results: unwrittenSamples,
NotificationHandler: notificationHandler,
EvaluationInterval: conf.EvaluationInterval(),
Storage: memStorage,
PrometheusUrl: web.MustBuildServerUrl(),
})
if err := ruleManager.AddRulesFromConfig(conf); err != nil {
glog.Fatal("Error loading rule files: ", err)
}
registry.MustRegister(ts)
var remoteTSDBQueue *remote.TSDBQueueManager
if *remoteTSDBUrl == "" {
@ -251,50 +143,12 @@ func main() {
} else {
openTSDB := opentsdb.NewClient(*remoteTSDBUrl, *remoteTSDBTimeout)
remoteTSDBQueue = remote.NewTSDBQueueManager(openTSDB, 512)
registry.MustRegister(remoteTSDBQueue)
go remoteTSDBQueue.Run()
}
unwrittenSamples := make(chan *extraction.Result, *samplesQueueCapacity)
ingester := &retrieval.MergeLabelsIngester{
Labels: conf.GlobalLabels(),
CollisionPrefix: clientmodel.ExporterLabelPrefix,
Ingester: retrieval.ChannelIngester(unwrittenSamples),
}
compactionTimer := time.NewTicker(*compactInterval)
deletionTimer := time.NewTicker(*deleteInterval)
// Queue depth will need to be exposed
targetManager := retrieval.NewTargetManager(ingester)
targetManager.AddTargetsFromConfig(conf)
notifications := make(chan notification.NotificationReqs, *notificationQueueCapacity)
// Queue depth will need to be exposed
ruleManager := manager.NewRuleManager(&manager.RuleManagerOptions{
Results: unwrittenSamples,
Notifications: notifications,
EvaluationInterval: conf.EvaluationInterval(),
Storage: ts,
PrometheusUrl: web.MustBuildServerUrl(),
})
if err := ruleManager.AddRulesFromConfig(conf); err != nil {
glog.Fatal("Error loading rule files: ", err)
}
go ruleManager.Run()
notificationHandler := notification.NewNotificationHandler(*alertmanagerUrl, notifications)
registry.MustRegister(notificationHandler)
go notificationHandler.Run()
flags := map[string]string{}
flag.VisitAll(func(f *flag.Flag) {
flags[f.Name] = f.Value.String()
})
prometheusStatus := &web.PrometheusStatusHandler{
BuildInfo: BuildInfo,
Config: conf.String(),
@ -309,96 +163,144 @@ func main() {
}
consolesHandler := &web.ConsolesHandler{
Storage: ts,
}
databasesHandler := &web.DatabasesHandler{
Provider: ts.DiskStorage,
RefreshInterval: 5 * time.Minute,
Storage: memStorage,
}
metricsService := &api.MetricsService{
Config: &conf,
TargetManager: targetManager,
Storage: ts,
Storage: memStorage,
}
prometheus := &prometheus{
compactionTimer: compactionTimer,
deletionTimer: deletionTimer,
curationState: prometheusStatus,
curationSema: make(chan struct{}, 1),
unwrittenSamples: unwrittenSamples,
stopBackgroundOperations: make(chan struct{}),
ruleManager: ruleManager,
targetManager: targetManager,
notifications: notifications,
storage: ts,
remoteTSDBQueue: remoteTSDBQueue,
}
defer prometheus.Close()
webService := &web.WebService{
StatusHandler: prometheusStatus,
MetricsHandler: metricsService,
DatabasesHandler: databasesHandler,
ConsolesHandler: consolesHandler,
AlertsHandler: alertsHandler,
QuitDelegate: prometheus.Close,
StatusHandler: prometheusStatus,
MetricsHandler: metricsService,
ConsolesHandler: consolesHandler,
AlertsHandler: alertsHandler,
}
prometheus.curationSema <- struct{}{}
p := &prometheus{
unwrittenSamples: unwrittenSamples,
storageStarted := make(chan bool)
go ts.Serve(storageStarted)
<-storageStarted
ruleManager: ruleManager,
targetManager: targetManager,
notificationHandler: notificationHandler,
storage: memStorage,
remoteTSDBQueue: remoteTSDBQueue,
go prometheus.interruptHandler()
webService: webService,
}
webService.QuitDelegate = p.Close
return p
}
// Serve starts the Prometheus server. It returns after the server has been shut
// down. The method installs an interrupt handler, allowing to trigger a
// shutdown by sending SIGTERM to the process.
func (p *prometheus) Serve() {
if p.remoteTSDBQueue != nil {
go p.remoteTSDBQueue.Run()
}
go p.ruleManager.Run()
go p.notificationHandler.Run()
go p.interruptHandler()
p.storage.Start()
go func() {
for _ = range prometheus.compactionTimer.C {
glog.Info("Starting compaction...")
err := prometheus.compact(*compactAgeInclusiveness, *compactGroupSize)
if err != nil {
glog.Error("could not compact: ", err)
}
glog.Info("Done")
}
}()
go func() {
for _ = range prometheus.deletionTimer.C {
glog.Info("Starting deletion of stale values...")
err := prometheus.delete(*deleteAge, deletionBatchSize)
if err != nil {
glog.Error("could not delete: ", err)
}
glog.Info("Done")
}
}()
go func() {
err := webService.ServeForever()
err := p.webService.ServeForever()
if err != nil {
glog.Fatal(err)
}
}()
// TODO(all): Migrate this into prometheus.serve().
for block := range unwrittenSamples {
for block := range p.unwrittenSamples {
if block.Err == nil && len(block.Samples) > 0 {
ts.AppendSamples(block.Samples)
if remoteTSDBQueue != nil {
remoteTSDBQueue.Queue(block.Samples)
p.storage.AppendSamples(block.Samples)
if p.remoteTSDBQueue != nil {
p.remoteTSDBQueue.Queue(block.Samples)
}
}
}
// The following shut-down operations have to happen after
// unwrittenSamples is drained. So do not move them into close().
if err := p.storage.Stop(); err != nil {
glog.Error("Error stopping local storage: ", err)
}
if p.remoteTSDBQueue != nil {
p.remoteTSDBQueue.Stop()
}
p.notificationHandler.Stop()
glog.Info("See you next time!")
}
// Close cleanly shuts down the Prometheus server.
func (p *prometheus) Close() {
p.closeOnce.Do(p.close)
}
func (p *prometheus) interruptHandler() {
notifier := make(chan os.Signal)
signal.Notify(notifier, os.Interrupt, syscall.SIGTERM)
<-notifier
glog.Warning("Received SIGTERM, exiting gracefully...")
p.Close()
}
func (p *prometheus) close() {
glog.Info("Shutdown has been requested; subsytems are closing:")
p.targetManager.Stop()
p.ruleManager.Stop()
close(p.unwrittenSamples)
// Note: Before closing the remaining subsystems (storage, ...), we have
// to wait until p.unwrittenSamples is actually drained. Therefore,
// remaining shut-downs happen in Serve().
}
// Describe implements registry.Collector.
func (p *prometheus) Describe(ch chan<- *registry.Desc) {
ch <- samplesQueueCapDesc
ch <- samplesQueueLenDesc
p.notificationHandler.Describe(ch)
p.storage.Describe(ch)
if p.remoteTSDBQueue != nil {
p.remoteTSDBQueue.Describe(ch)
}
}
// Collect implements registry.Collector.
func (p *prometheus) Collect(ch chan<- registry.Metric) {
ch <- registry.MustNewConstMetric(
samplesQueueCapDesc,
registry.GaugeValue,
float64(cap(p.unwrittenSamples)),
)
ch <- registry.MustNewConstMetric(
samplesQueueLenDesc,
registry.GaugeValue,
float64(len(p.unwrittenSamples)),
)
p.notificationHandler.Collect(ch)
p.storage.Collect(ch)
if p.remoteTSDBQueue != nil {
p.remoteTSDBQueue.Collect(ch)
}
}
func main() {
flag.Parse()
versionInfoTmpl.Execute(os.Stdout, BuildInfo)
if *printVersion {
os.Exit(0)
}
p := NewPrometheus()
registry.MustRegister(p)
p.Serve()
}

View file

@ -1,31 +0,0 @@
# Copyright 2013 Prometheus Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all: generated/data.pb.go generated/descriptor.blob
SUFFIXES:
include ../Makefile.INCLUDE
# In order to build the generated targets in this directory, run the
# following:
#
# make -C .build goprotobuf-protoc-gen-go-stamp
generated/data.pb.go: data.proto
$(MAKE) -C ../.build goprotobuf-protoc-gen-go-stamp
$(PROTOC) --proto_path=$(PREFIX)/include:. --include_imports --go_out=generated/ --descriptor_set_out=generated/descriptor.blob data.proto
generated/descriptor.blob: data.proto
$(MAKE) -C ../.build goprotobuf-protoc-gen-go-stamp
$(PROTOC) --proto_path=$(PREFIX)/include:. --include_imports --go_out=generated/ --descriptor_set_out=generated/descriptor.blob data.proto

View file

@ -1,125 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package io.prometheus;
import "google/protobuf/descriptor.proto";
message LabelPair {
optional string name = 1;
optional string value = 2;
}
message LabelName {
optional string name = 1;
}
message LabelValueCollection {
repeated string member = 1;
}
message Metric {
repeated LabelPair label_pair = 1;
}
message Fingerprint {
optional string signature = 1;
}
message FingerprintCollection {
repeated Fingerprint member = 1;
}
message LabelSet {
repeated LabelPair member = 1;
}
// The default LevelDB comparator sorts not only lexicographically, but also by
// key length (which takes precedence). Thus, no variable-length fields may be
// introduced into the key definition below.
message SampleKey {
optional Fingerprint fingerprint = 1;
optional bytes timestamp = 2;
optional sfixed64 last_timestamp = 3;
optional fixed32 sample_count = 4;
}
message MembershipIndexValue {
}
message MetricHighWatermark {
optional int64 timestamp = 1;
}
// CompactionProcessorDefinition models a curation process across the sample
// corpus that ensures that sparse samples.
message CompactionProcessorDefinition {
// minimum_group_size identifies how minimally samples should be grouped
// together to write a new samples chunk.
optional uint32 minimum_group_size = 1;
}
// CurationKey models the state of curation for a given metric fingerprint and
// its associated samples. The time series database only knows about compaction
// and resampling behaviors that are explicitly defined to it in its runtime
// configuration, meaning it never scans on-disk tables for CurationKey
// policies; rather, it looks up via the CurationKey tuple to find out what the
// effectuation state for a given metric fingerprint is.
//
// For instance, how far along as a rule for (Fingerprint A, Samples Older Than
// B, and Curation Processor) has been effectuated on-disk.
message CurationKey {
// fingerprint identifies the fingerprint for the given policy.
optional Fingerprint fingerprint = 1;
// processor_message_type_name identifies the underlying message type that
// was used to encode processor_message_raw.
optional string processor_message_type_name = 2;
// processor_message_raw identifies the serialized ProcessorSignature for this
// operation.
optional bytes processor_message_raw = 3;
// ignore_younger_than represents in seconds relative to when the curation
// cycle start when the curator should stop operating. For instance, if
// the curation cycle starts at time T and the curation remark dictates that
// the curation should starts processing samples at time S, the curator should
// work from S until ignore_younger_than seconds before T:
//
// PAST NOW FUTURE
//
// S--------------->|----------T
// |---IYT----|
//
// [Curation Resumption Time (S), T - IYT)
optional int64 ignore_younger_than = 4;
// This could be populated by decoding the generated descriptor file into a
// FileDescriptorSet message and extracting the type definition for the given
// message schema that describes processor_message_type_name.
//
// optional google.protobuf.DescriptorProto processor_message_type_descriptor_raw = 5;
}
// CurationValue models the progress for a given CurationKey.
message CurationValue {
// last_completion_timestamp represents the seconds since the epoch UTC at
// which the curator last completed its duty cycle for a given metric
// fingerprint.
optional int64 last_completion_timestamp = 1;
}
// DeletionProcessorDefinition models a curation process across the sample
// corpus that deletes old values.
message DeletionProcessorDefinition {
}

View file

@ -1,344 +0,0 @@
// Code generated by protoc-gen-go.
// source: data.proto
// DO NOT EDIT!
/*
Package io_prometheus is a generated protocol buffer package.
It is generated from these files:
data.proto
It has these top-level messages:
LabelPair
LabelName
LabelValueCollection
Metric
Fingerprint
FingerprintCollection
LabelSet
SampleKey
MembershipIndexValue
MetricHighWatermark
CompactionProcessorDefinition
CurationKey
CurationValue
DeletionProcessorDefinition
*/
package io_prometheus
import proto "code.google.com/p/goprotobuf/proto"
import json "encoding/json"
import math "math"
// discarding unused import google_protobuf "google/protobuf/descriptor.pb"
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
var _ = proto.Marshal
var _ = &json.SyntaxError{}
var _ = math.Inf
type LabelPair struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (m *LabelPair) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
func (m *LabelPair) GetValue() string {
if m != nil && m.Value != nil {
return *m.Value
}
return ""
}
type LabelName struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *LabelName) Reset() { *m = LabelName{} }
func (m *LabelName) String() string { return proto.CompactTextString(m) }
func (*LabelName) ProtoMessage() {}
func (m *LabelName) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
type LabelValueCollection struct {
Member []string `protobuf:"bytes,1,rep,name=member" json:"member,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *LabelValueCollection) Reset() { *m = LabelValueCollection{} }
func (m *LabelValueCollection) String() string { return proto.CompactTextString(m) }
func (*LabelValueCollection) ProtoMessage() {}
func (m *LabelValueCollection) GetMember() []string {
if m != nil {
return m.Member
}
return nil
}
type Metric struct {
LabelPair []*LabelPair `protobuf:"bytes,1,rep,name=label_pair" json:"label_pair,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (m *Metric) GetLabelPair() []*LabelPair {
if m != nil {
return m.LabelPair
}
return nil
}
type Fingerprint struct {
Signature *string `protobuf:"bytes,1,opt,name=signature" json:"signature,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Fingerprint) Reset() { *m = Fingerprint{} }
func (m *Fingerprint) String() string { return proto.CompactTextString(m) }
func (*Fingerprint) ProtoMessage() {}
func (m *Fingerprint) GetSignature() string {
if m != nil && m.Signature != nil {
return *m.Signature
}
return ""
}
type FingerprintCollection struct {
Member []*Fingerprint `protobuf:"bytes,1,rep,name=member" json:"member,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *FingerprintCollection) Reset() { *m = FingerprintCollection{} }
func (m *FingerprintCollection) String() string { return proto.CompactTextString(m) }
func (*FingerprintCollection) ProtoMessage() {}
func (m *FingerprintCollection) GetMember() []*Fingerprint {
if m != nil {
return m.Member
}
return nil
}
type LabelSet struct {
Member []*LabelPair `protobuf:"bytes,1,rep,name=member" json:"member,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *LabelSet) Reset() { *m = LabelSet{} }
func (m *LabelSet) String() string { return proto.CompactTextString(m) }
func (*LabelSet) ProtoMessage() {}
func (m *LabelSet) GetMember() []*LabelPair {
if m != nil {
return m.Member
}
return nil
}
// The default LevelDB comparator sorts not only lexicographically, but also by
// key length (which takes precedence). Thus, no variable-length fields may be
// introduced into the key definition below.
type SampleKey struct {
Fingerprint *Fingerprint `protobuf:"bytes,1,opt,name=fingerprint" json:"fingerprint,omitempty"`
Timestamp []byte `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"`
LastTimestamp *int64 `protobuf:"fixed64,3,opt,name=last_timestamp" json:"last_timestamp,omitempty"`
SampleCount *uint32 `protobuf:"fixed32,4,opt,name=sample_count" json:"sample_count,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *SampleKey) Reset() { *m = SampleKey{} }
func (m *SampleKey) String() string { return proto.CompactTextString(m) }
func (*SampleKey) ProtoMessage() {}
func (m *SampleKey) GetFingerprint() *Fingerprint {
if m != nil {
return m.Fingerprint
}
return nil
}
func (m *SampleKey) GetTimestamp() []byte {
if m != nil {
return m.Timestamp
}
return nil
}
func (m *SampleKey) GetLastTimestamp() int64 {
if m != nil && m.LastTimestamp != nil {
return *m.LastTimestamp
}
return 0
}
func (m *SampleKey) GetSampleCount() uint32 {
if m != nil && m.SampleCount != nil {
return *m.SampleCount
}
return 0
}
type MembershipIndexValue struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *MembershipIndexValue) Reset() { *m = MembershipIndexValue{} }
func (m *MembershipIndexValue) String() string { return proto.CompactTextString(m) }
func (*MembershipIndexValue) ProtoMessage() {}
type MetricHighWatermark struct {
Timestamp *int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *MetricHighWatermark) Reset() { *m = MetricHighWatermark{} }
func (m *MetricHighWatermark) String() string { return proto.CompactTextString(m) }
func (*MetricHighWatermark) ProtoMessage() {}
func (m *MetricHighWatermark) GetTimestamp() int64 {
if m != nil && m.Timestamp != nil {
return *m.Timestamp
}
return 0
}
// CompactionProcessorDefinition models a curation process across the sample
// corpus that ensures that sparse samples.
type CompactionProcessorDefinition struct {
// minimum_group_size identifies how minimally samples should be grouped
// together to write a new samples chunk.
MinimumGroupSize *uint32 `protobuf:"varint,1,opt,name=minimum_group_size" json:"minimum_group_size,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CompactionProcessorDefinition) Reset() { *m = CompactionProcessorDefinition{} }
func (m *CompactionProcessorDefinition) String() string { return proto.CompactTextString(m) }
func (*CompactionProcessorDefinition) ProtoMessage() {}
func (m *CompactionProcessorDefinition) GetMinimumGroupSize() uint32 {
if m != nil && m.MinimumGroupSize != nil {
return *m.MinimumGroupSize
}
return 0
}
// CurationKey models the state of curation for a given metric fingerprint and
// its associated samples. The time series database only knows about compaction
// and resampling behaviors that are explicitly defined to it in its runtime
// configuration, meaning it never scans on-disk tables for CurationKey
// policies; rather, it looks up via the CurationKey tuple to find out what the
// effectuation state for a given metric fingerprint is.
//
// For instance, how far along as a rule for (Fingerprint A, Samples Older Than
// B, and Curation Processor) has been effectuated on-disk.
type CurationKey struct {
// fingerprint identifies the fingerprint for the given policy.
Fingerprint *Fingerprint `protobuf:"bytes,1,opt,name=fingerprint" json:"fingerprint,omitempty"`
// processor_message_type_name identifies the underlying message type that
// was used to encode processor_message_raw.
ProcessorMessageTypeName *string `protobuf:"bytes,2,opt,name=processor_message_type_name" json:"processor_message_type_name,omitempty"`
// processor_message_raw identifies the serialized ProcessorSignature for this
// operation.
ProcessorMessageRaw []byte `protobuf:"bytes,3,opt,name=processor_message_raw" json:"processor_message_raw,omitempty"`
// ignore_younger_than represents in seconds relative to when the curation
// cycle start when the curator should stop operating. For instance, if
// the curation cycle starts at time T and the curation remark dictates that
// the curation should starts processing samples at time S, the curator should
// work from S until ignore_younger_than seconds before T:
//
// PAST NOW FUTURE
//
// S--------------->|----------T
// |---IYT----|
//
// [Curation Resumption Time (S), T - IYT)
IgnoreYoungerThan *int64 `protobuf:"varint,4,opt,name=ignore_younger_than" json:"ignore_younger_than,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CurationKey) Reset() { *m = CurationKey{} }
func (m *CurationKey) String() string { return proto.CompactTextString(m) }
func (*CurationKey) ProtoMessage() {}
func (m *CurationKey) GetFingerprint() *Fingerprint {
if m != nil {
return m.Fingerprint
}
return nil
}
func (m *CurationKey) GetProcessorMessageTypeName() string {
if m != nil && m.ProcessorMessageTypeName != nil {
return *m.ProcessorMessageTypeName
}
return ""
}
func (m *CurationKey) GetProcessorMessageRaw() []byte {
if m != nil {
return m.ProcessorMessageRaw
}
return nil
}
func (m *CurationKey) GetIgnoreYoungerThan() int64 {
if m != nil && m.IgnoreYoungerThan != nil {
return *m.IgnoreYoungerThan
}
return 0
}
// CurationValue models the progress for a given CurationKey.
type CurationValue struct {
// last_completion_timestamp represents the seconds since the epoch UTC at
// which the curator last completed its duty cycle for a given metric
// fingerprint.
LastCompletionTimestamp *int64 `protobuf:"varint,1,opt,name=last_completion_timestamp" json:"last_completion_timestamp,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CurationValue) Reset() { *m = CurationValue{} }
func (m *CurationValue) String() string { return proto.CompactTextString(m) }
func (*CurationValue) ProtoMessage() {}
func (m *CurationValue) GetLastCompletionTimestamp() int64 {
if m != nil && m.LastCompletionTimestamp != nil {
return *m.LastCompletionTimestamp
}
return 0
}
// DeletionProcessorDefinition models a curation process across the sample
// corpus that deletes old values.
type DeletionProcessorDefinition struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *DeletionProcessorDefinition) Reset() { *m = DeletionProcessorDefinition{} }
func (m *DeletionProcessorDefinition) String() string { return proto.CompactTextString(m) }
func (*DeletionProcessorDefinition) ProtoMessage() {}
func init() {
}

Binary file not shown.

View file

@ -23,9 +23,9 @@ import (
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/utility"
)
@ -47,7 +47,7 @@ const (
)
var (
deadline = flag.Duration("alertmanager.httpDeadline", 10*time.Second, "Alert manager HTTP API timeout.")
deadline = flag.Duration("alertmanager.http-deadline", 10*time.Second, "Alert manager HTTP API timeout.")
)
// A request for sending a notification to the alert manager for a single alert
@ -81,21 +81,24 @@ type NotificationHandler struct {
// The URL of the alert manager to send notifications to.
alertmanagerUrl string
// Buffer of notifications that have not yet been sent.
pendingNotifications <-chan NotificationReqs
pendingNotifications chan NotificationReqs
// HTTP client with custom timeout settings.
httpClient httpPoster
notificationLatency *prometheus.SummaryVec
notificationsQueueLength prometheus.Gauge
notificationsQueueCapacity prometheus.Metric
stopped chan struct{}
}
// Construct a new NotificationHandler.
func NewNotificationHandler(alertmanagerUrl string, notificationReqs <-chan NotificationReqs) *NotificationHandler {
func NewNotificationHandler(alertmanagerUrl string, notificationQueueCapacity int) *NotificationHandler {
return &NotificationHandler{
alertmanagerUrl: alertmanagerUrl,
pendingNotifications: notificationReqs,
httpClient: utility.NewDeadlineClient(*deadline),
pendingNotifications: make(chan NotificationReqs, notificationQueueCapacity),
httpClient: utility.NewDeadlineClient(*deadline),
notificationLatency: prometheus.NewSummaryVec(
prometheus.SummaryOpts{
@ -119,8 +122,9 @@ func NewNotificationHandler(alertmanagerUrl string, notificationReqs <-chan Noti
nil, nil,
),
prometheus.GaugeValue,
float64(cap(notificationReqs)),
float64(notificationQueueCapacity),
),
stopped: make(chan struct{}),
}
}
@ -163,7 +167,7 @@ func (n *NotificationHandler) sendNotifications(reqs NotificationReqs) error {
return nil
}
// Continuously dispatch notifications.
// Run dispatches notifications continuously.
func (n *NotificationHandler) Run() {
for reqs := range n.pendingNotifications {
if n.alertmanagerUrl == "" {
@ -185,6 +189,35 @@ func (n *NotificationHandler) Run() {
float64(time.Since(begin) / time.Millisecond),
)
}
close(n.stopped)
}
// SubmitReqs queues the given notification requests for processing.
func (n *NotificationHandler) SubmitReqs(reqs NotificationReqs) {
n.pendingNotifications <- reqs
}
// Stop shuts down the notification handler.
func (n *NotificationHandler) Stop() {
glog.Info("Stopping notification handler...")
close(n.pendingNotifications)
<-n.stopped
glog.Info("Notification handler stopped.")
}
// Describe implements prometheus.Collector.
func (n *NotificationHandler) Describe(ch chan<- *prometheus.Desc) {
n.notificationLatency.Describe(ch)
ch <- n.notificationsQueueLength.Desc()
ch <- n.notificationsQueueCapacity.Desc()
}
// Collect implements prometheus.Collector.
func (n *NotificationHandler) Collect(ch chan<- prometheus.Metric) {
n.notificationLatency.Collect(ch)
n.notificationsQueueLength.Set(float64(len(n.pendingNotifications)))
ch <- n.notificationsQueueLength
ch <- n.notificationsQueueCapacity
}
// Describe implements prometheus.Collector.

View file

@ -46,9 +46,8 @@ type testNotificationScenario struct {
}
func (s *testNotificationScenario) test(i int, t *testing.T) {
notifications := make(chan NotificationReqs)
defer close(notifications)
h := NewNotificationHandler("alertmanager_url", notifications)
h := NewNotificationHandler("alertmanager_url", 0)
defer h.Stop()
receivedPost := make(chan bool, 1)
poster := testHttpPoster{receivedPost: receivedPost}
@ -56,7 +55,7 @@ func (s *testNotificationScenario) test(i int, t *testing.T) {
go h.Run()
notifications <- NotificationReqs{
h.SubmitReqs(NotificationReqs{
{
Summary: s.summary,
Description: s.description,
@ -68,7 +67,7 @@ func (s *testNotificationScenario) test(i int, t *testing.T) {
RuleString: "Test rule string",
GeneratorURL: "prometheus_url",
},
}
})
<-receivedPost
if poster.message != s.message {

View file

@ -126,10 +126,12 @@ type Target interface {
GlobalAddress() string
// Return the target's base labels.
BaseLabels() clientmodel.LabelSet
// Merge a new externally supplied target definition (e.g. with changed base
// labels) into an old target definition for the same endpoint. Preserve
// remaining information - like health state - from the old target.
Merge(newTarget Target)
// SetBaseLabelsFrom queues a replacement of the current base labels by
// the labels of the given target. The method returns immediately after
// queuing. The actual replacement of the base labels happens
// asynchronously (but most likely before the next scrape for the target
// begins).
SetBaseLabelsFrom(Target)
// Scrape target at the specified interval.
RunScraper(extraction.Ingester, time.Duration)
// Stop scraping, synchronous.
@ -139,6 +141,9 @@ type Target interface {
}
// target is a Target that refers to a singular HTTP or HTTPS endpoint.
//
// TODO: The implementation is not yet goroutine safe, but for the web status,
// methods are called concurrently.
type target struct {
// The current health state of the target.
state TargetState
@ -146,9 +151,10 @@ type target struct {
lastError error
// The last time a scrape was attempted.
lastScrape time.Time
// Channel to signal RunScraper should stop, holds a channel
// to notify once stopped.
stopScraper chan bool
// Closing stopScraper signals that scraping should stop.
stopScraper chan struct{}
// Channel to queue base labels to be replaced.
newBaseLabels chan clientmodel.LabelSet
address string
// What is the deadline for the HTTP or HTTPS against this endpoint.
@ -162,11 +168,12 @@ type target struct {
// Furnish a reasonably configured target for querying.
func NewTarget(address string, deadline time.Duration, baseLabels clientmodel.LabelSet) Target {
target := &target{
address: address,
Deadline: deadline,
baseLabels: baseLabels,
httpClient: utility.NewDeadlineClient(deadline),
stopScraper: make(chan bool),
address: address,
Deadline: deadline,
baseLabels: baseLabels,
httpClient: utility.NewDeadlineClient(deadline),
stopScraper: make(chan struct{}),
newBaseLabels: make(chan clientmodel.LabelSet, 1),
}
return target
@ -197,11 +204,25 @@ func (t *target) recordScrapeHealth(ingester extraction.Ingester, timestamp clie
})
}
// RunScraper implements Target.
func (t *target) RunScraper(ingester extraction.Ingester, interval time.Duration) {
defer func() {
// Need to drain t.newBaseLabels to not make senders block during shutdown.
for {
select {
case <-t.newBaseLabels:
// Do nothing.
default:
return
}
}
}()
jitterTimer := time.NewTimer(time.Duration(float64(interval) * rand.Float64()))
select {
case <-jitterTimer.C:
case <-t.stopScraper:
jitterTimer.Stop()
return
}
jitterTimer.Stop()
@ -211,20 +232,39 @@ func (t *target) RunScraper(ingester extraction.Ingester, interval time.Duration
t.lastScrape = time.Now()
t.scrape(ingester)
// Explanation of the contraption below:
//
// In case t.newBaseLabels or t.stopScraper have something to receive,
// we want to read from those channels rather than starting a new scrape
// (which might take very long). That's why the outer select has no
// ticker.C. Should neither t.newBaseLabels nor t.stopScraper have
// anything to receive, we go into the inner select, where ticker.C is
// in the mix.
for {
select {
case <-ticker.C:
targetIntervalLength.WithLabelValues(interval.String()).Observe(float64(time.Since(t.lastScrape) / time.Second))
t.lastScrape = time.Now()
t.scrape(ingester)
case newBaseLabels := <-t.newBaseLabels:
t.baseLabels = newBaseLabels
case <-t.stopScraper:
return
default:
select {
case newBaseLabels := <-t.newBaseLabels:
t.baseLabels = newBaseLabels
case <-t.stopScraper:
return
case <-ticker.C:
targetIntervalLength.WithLabelValues(interval.String()).Observe(float64(time.Since(t.lastScrape) / time.Second))
t.lastScrape = time.Now()
t.scrape(ingester)
}
}
}
}
// StopScraper implements Target.
func (t *target) StopScraper() {
t.stopScraper <- true
close(t.stopScraper)
}
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,application/json;schema="prometheus/telemetry";version=0.0.2;q=0.2,*/*;q=0.1`
@ -270,8 +310,8 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
return err
}
// XXX: This is a wart; we need to handle this more gracefully down the
// road, especially once we have service discovery support.
// TODO: This is a wart; we need to handle this more gracefully down the
// road, especially once we have service discovery support.
baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.Address())}
for baseLabel, baseValue := range t.baseLabels {
baseLabels[baseLabel] = baseValue
@ -289,22 +329,27 @@ func (t *target) scrape(ingester extraction.Ingester) (err error) {
return processor.ProcessSingle(resp.Body, i, processOptions)
}
// LastError implements Target.
func (t *target) LastError() error {
return t.lastError
}
// State implements Target.
func (t *target) State() TargetState {
return t.state
}
// LastScrape implements Target.
func (t *target) LastScrape() time.Time {
return t.lastScrape
}
// Address implements Target.
func (t *target) Address() string {
return t.address
}
// GlobalAddress implements Target.
func (t *target) GlobalAddress() string {
address := t.address
hostname, err := os.Hostname()
@ -318,18 +363,17 @@ func (t *target) GlobalAddress() string {
return address
}
// BaseLabels implements Target.
func (t *target) BaseLabels() clientmodel.LabelSet {
return t.baseLabels
}
// Merge a new externally supplied target definition (e.g. with changed base
// labels) into an old target definition for the same endpoint. Preserve
// remaining information - like health state - from the old target.
func (t *target) Merge(newTarget Target) {
// SetBaseLabelsFrom implements Target.
func (t *target) SetBaseLabelsFrom(newTarget Target) {
if t.Address() != newTarget.Address() {
panic("targets don't refer to the same endpoint")
}
t.baseLabels = newTarget.BaseLabels()
t.newBaseLabels <- newTarget.BaseLabels()
}
type targets []Target

View file

@ -149,7 +149,7 @@ func TestTargetRunScraperScrapes(t *testing.T) {
state: UNKNOWN,
address: "bad schema",
httpClient: utility.NewDeadlineClient(0),
stopScraper: make(chan bool, 1),
stopScraper: make(chan struct{}),
}
go testTarget.RunScraper(nopIngester{}, time.Duration(time.Millisecond))

View file

@ -14,6 +14,7 @@
package retrieval
import (
"sync"
"github.com/golang/glog"
"github.com/prometheus/client_golang/extraction"
@ -57,7 +58,7 @@ func (m *targetManager) TargetPoolForJob(job config.JobConfig) *TargetPool {
glog.Infof("Pool for job %s does not exist; creating and starting...", job.GetName())
m.poolsByJob[job.GetName()] = targetPool
// BUG(all): Investigate whether this auto-goroutine creation is desired.
// TODO: Investigate whether this auto-goroutine creation is desired.
go targetPool.Run()
}
@ -105,13 +106,22 @@ func (m *targetManager) AddTargetsFromConfig(config config.Config) {
}
func (m *targetManager) Stop() {
glog.Info("Target manager exiting...")
for _, p := range m.poolsByJob {
p.Stop()
glog.Info("Stopping target manager...")
var wg sync.WaitGroup
for j, p := range m.poolsByJob {
wg.Add(1)
go func(j string, p *TargetPool) {
defer wg.Done()
glog.Infof("Stopping target pool %q...", j)
p.Stop()
glog.Infof("Target pool %q stopped.", j)
}(j, p)
}
wg.Wait()
glog.Info("Target manager stopped.")
}
// XXX: Not really thread-safe. Only used in /status page for now.
// TODO: Not goroutine-safe. Only used in /status page for now.
func (m *targetManager) Pools() map[string]*TargetPool {
return m.poolsByJob
}

View file

@ -76,7 +76,7 @@ func (t fakeTarget) State() TargetState {
return ALIVE
}
func (t *fakeTarget) Merge(newTarget Target) {}
func (t *fakeTarget) SetBaseLabelsFrom(newTarget Target) {}
func testTargetManager(t testing.TB) {
targetManager := NewTargetManager(nopIngester{})

View file

@ -30,7 +30,7 @@ const (
type TargetPool struct {
sync.RWMutex
done chan chan bool
done chan chan struct{}
manager TargetManager
targetsByAddress map[string]Target
interval time.Duration
@ -48,7 +48,7 @@ func NewTargetPool(m TargetManager, p TargetProvider, ing extraction.Ingester, i
targetsByAddress: make(map[string]Target),
addTargetQueue: make(chan Target, targetAddQueueSize),
targetProvider: p,
done: make(chan chan bool),
done: make(chan chan struct{}),
}
}
@ -71,15 +71,14 @@ func (p *TargetPool) Run() {
p.addTarget(newTarget)
case stopped := <-p.done:
p.ReplaceTargets([]Target{})
glog.Info("TargetPool exiting...")
stopped <- true
close(stopped)
return
}
}
}
func (p TargetPool) Stop() {
stopped := make(chan bool)
func (p *TargetPool) Stop() {
stopped := make(chan struct{})
p.done <- stopped
<-stopped
}
@ -108,20 +107,27 @@ func (p *TargetPool) ReplaceTargets(newTargets []Target) {
newTargetAddresses.Add(newTarget.Address())
oldTarget, ok := p.targetsByAddress[newTarget.Address()]
if ok {
oldTarget.Merge(newTarget)
oldTarget.SetBaseLabelsFrom(newTarget)
} else {
p.targetsByAddress[newTarget.Address()] = newTarget
go newTarget.RunScraper(p.ingester, p.interval)
}
}
// Stop any targets no longer present.
var wg sync.WaitGroup
for k, oldTarget := range p.targetsByAddress {
if !newTargetAddresses.Has(k) {
glog.V(1).Info("Stopping scraper for target ", k)
oldTarget.StopScraper()
delete(p.targetsByAddress, k)
wg.Add(1)
go func(k string, oldTarget Target) {
defer wg.Done()
glog.V(1).Infof("Stopping scraper for target %s...", k)
oldTarget.StopScraper()
delete(p.targetsByAddress, k)
glog.V(1).Infof("Scraper for target %s stopped.", k)
}(k, oldTarget)
}
}
wg.Wait()
}
func (p *TargetPool) Targets() []Target {

View file

@ -14,8 +14,11 @@
package retrieval
import (
"net/http"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
)
func testTargetPool(t testing.TB) {
@ -46,12 +49,12 @@ func testTargetPool(t testing.TB) {
name: "single element",
inputs: []input{
{
address: "http://single.com",
address: "single1",
},
},
outputs: []output{
{
address: "http://single.com",
address: "single1",
},
},
},
@ -59,18 +62,18 @@ func testTargetPool(t testing.TB) {
name: "plural schedules",
inputs: []input{
{
address: "http://plural.net",
address: "plural1",
},
{
address: "http://plural.com",
address: "plural2",
},
},
outputs: []output{
{
address: "http://plural.net",
address: "plural1",
},
{
address: "http://plural.com",
address: "plural2",
},
},
},
@ -81,9 +84,10 @@ func testTargetPool(t testing.TB) {
for _, input := range scenario.inputs {
target := target{
address: input.address,
address: input.address,
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
pool.addTarget(&target)
}
@ -91,11 +95,8 @@ func testTargetPool(t testing.TB) {
t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), len(pool.targetsByAddress))
} else {
for j, output := range scenario.outputs {
target := pool.Targets()[j]
if target.Address() != output.address {
if target, ok := pool.targetsByAddress[output.address]; !ok {
t.Errorf("%s %d.%d. expected Target address to be %s but was %s", scenario.name, i, j, output.address, target.Address())
}
}
@ -113,30 +114,34 @@ func TestTargetPool(t *testing.T) {
func TestTargetPoolReplaceTargets(t *testing.T) {
pool := NewTargetPool(nil, nil, nopIngester{}, time.Duration(1))
oldTarget1 := &target{
address: "example1",
state: UNREACHABLE,
stopScraper: make(chan bool, 1),
address: "example1",
state: UNREACHABLE,
stopScraper: make(chan struct{}),
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
oldTarget2 := &target{
address: "example2",
state: UNREACHABLE,
stopScraper: make(chan bool, 1),
address: "example2",
state: UNREACHABLE,
stopScraper: make(chan struct{}),
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
newTarget1 := &target{
address: "example1",
state: ALIVE,
stopScraper: make(chan bool, 1),
address: "example1",
state: ALIVE,
stopScraper: make(chan struct{}),
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
newTarget2 := &target{
address: "example3",
state: ALIVE,
stopScraper: make(chan bool, 1),
address: "example3",
state: ALIVE,
stopScraper: make(chan struct{}),
newBaseLabels: make(chan clientmodel.LabelSet, 1),
httpClient: &http.Client{},
}
oldTarget1.StopScraper()
oldTarget2.StopScraper()
newTarget2.StopScraper()
pool.addTarget(oldTarget1)
pool.addTarget(oldTarget2)

View file

@ -14,10 +14,11 @@
all: parser.y.go lexer.l.go
parser.y.go: parser.y
go tool yacc -o parser.y.go -v "" parser.y
$(GOCC) tool yacc -o parser.y.go -v "" parser.y
lexer.l.go: parser.y.go lexer.l
# This is golex from https://github.com/cznic/golex.
$(GO_GET) github.com/cznic/golex
golex -o="lexer.l.go" lexer.l
clean:

View file

@ -23,7 +23,7 @@ import (
"github.com/prometheus/prometheus/rules/ast"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/utility"
)
@ -118,11 +118,11 @@ func (rule *AlertingRule) Name() string {
return rule.name
}
func (rule *AlertingRule) EvalRaw(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) {
func (rule *AlertingRule) EvalRaw(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) {
return ast.EvalVectorInstant(rule.Vector, timestamp, storage, stats.NewTimerGroup())
}
func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) {
func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) {
// Get the raw value of the rule expression.
exprResult, err := rule.EvalRaw(timestamp, storage)
if err != nil {

View file

@ -15,20 +15,22 @@ package ast
import (
"errors"
"flag"
"fmt"
"hash/fnv"
"math"
"sort"
"time"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/metric"
)
var stalenessDelta = flag.Duration("query.staleness-delta", 300*time.Second, "Staleness delta allowance during expression evaluations.")
// ----------------------------------------------------------------------------
// Raw data value types.
@ -81,6 +83,17 @@ const (
OR
)
// shouldDropMetric indicates whether the metric name should be dropped after
// applying this operator to a vector.
func (opType BinOpType) shouldDropMetric() bool {
switch opType {
case ADD, SUB, MUL, DIV, MOD:
return true
default:
return false
}
}
// AggrType is an enum for aggregation types.
type AggrType int
@ -114,7 +127,7 @@ type Node interface {
type ScalarNode interface {
Node
// Eval evaluates and returns the value of the scalar represented by this node.
Eval(timestamp clientmodel.Timestamp, view *viewAdapter) clientmodel.SampleValue
Eval(timestamp clientmodel.Timestamp) clientmodel.SampleValue
}
// VectorNode is a Node for vector values.
@ -123,17 +136,17 @@ type VectorNode interface {
// Eval evaluates the node recursively and returns the result
// as a Vector (i.e. a slice of Samples all at the given
// Timestamp).
Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Vector
Eval(timestamp clientmodel.Timestamp) Vector
}
// MatrixNode is a Node for matrix values.
type MatrixNode interface {
Node
// Eval evaluates the node recursively and returns the result as a Matrix.
Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix
Eval(timestamp clientmodel.Timestamp) Matrix
// Eval evaluates the node recursively and returns the result
// as a Matrix that only contains the boundary values.
EvalBoundaries(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix
EvalBoundaries(timestamp clientmodel.Timestamp) Matrix
}
// StringNode is a Node for string values.
@ -141,7 +154,7 @@ type StringNode interface {
Node
// Eval evaluates and returns the value of the string
// represented by this node.
Eval(timestamp clientmodel.Timestamp, view *viewAdapter) string
Eval(timestamp clientmodel.Timestamp) string
}
// ----------------------------------------------------------------------------
@ -176,7 +189,11 @@ type (
// A VectorSelector represents a metric name plus labelset.
VectorSelector struct {
labelMatchers metric.LabelMatchers
// The series iterators are populated at query analysis time.
iterators map[clientmodel.Fingerprint]local.SeriesIterator
metrics map[clientmodel.Fingerprint]clientmodel.Metric
// Fingerprints are populated from label matchers at query analysis time.
// TODO: do we still need these?
fingerprints clientmodel.Fingerprints
}
@ -213,8 +230,11 @@ type (
// timerange.
MatrixSelector struct {
labelMatchers metric.LabelMatchers
// Fingerprints are populated from label matchers at query
// analysis time.
// The series iterators are populated at query analysis time.
iterators map[clientmodel.Fingerprint]local.SeriesIterator
metrics map[clientmodel.Fingerprint]clientmodel.Metric
// Fingerprints are populated from label matchers at query analysis time.
// TODO: do we still need these?
fingerprints clientmodel.Fingerprints
interval time.Duration
}
@ -308,22 +328,22 @@ func (node StringFunctionCall) Children() Nodes { return node.args }
// Eval implements the ScalarNode interface and returns the selector
// value.
func (node *ScalarLiteral) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) clientmodel.SampleValue {
func (node *ScalarLiteral) Eval(timestamp clientmodel.Timestamp) clientmodel.SampleValue {
return node.value
}
// Eval implements the ScalarNode interface and returns the result of
// the expression.
func (node *ScalarArithExpr) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) clientmodel.SampleValue {
lhs := node.lhs.Eval(timestamp, view)
rhs := node.rhs.Eval(timestamp, view)
func (node *ScalarArithExpr) Eval(timestamp clientmodel.Timestamp) clientmodel.SampleValue {
lhs := node.lhs.Eval(timestamp)
rhs := node.rhs.Eval(timestamp)
return evalScalarBinop(node.opType, lhs, rhs)
}
// Eval implements the ScalarNode interface and returns the result of
// the function call.
func (node *ScalarFunctionCall) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) clientmodel.SampleValue {
return node.function.callFn(timestamp, view, node.args).(clientmodel.SampleValue)
func (node *ScalarFunctionCall) Eval(timestamp clientmodel.Timestamp) clientmodel.SampleValue {
return node.function.callFn(timestamp, node.args).(clientmodel.SampleValue)
}
func (node *VectorAggregation) labelsToGroupingKey(labels clientmodel.Metric) uint64 {
@ -357,33 +377,34 @@ func labelsToKey(labels clientmodel.Metric) uint64 {
}
// EvalVectorInstant evaluates a VectorNode with an instant query.
func EvalVectorInstant(node VectorNode, timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (vector Vector, err error) {
viewAdapter, err := viewAdapterForInstantQuery(node, timestamp, storage, queryStats)
func EvalVectorInstant(node VectorNode, timestamp clientmodel.Timestamp, storage local.Storage, queryStats *stats.TimerGroup) (Vector, error) {
closer, err := prepareInstantQuery(node, timestamp, storage, queryStats)
if err != nil {
return
return nil, err
}
vector = node.Eval(timestamp, viewAdapter)
return
defer closer.Close()
return node.Eval(timestamp), nil
}
// EvalVectorRange evaluates a VectorNode with a range query.
func EvalVectorRange(node VectorNode, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (Matrix, error) {
func EvalVectorRange(node VectorNode, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage local.Storage, queryStats *stats.TimerGroup) (Matrix, error) {
// Explicitly initialize to an empty matrix since a nil Matrix encodes to
// null in JSON.
matrix := Matrix{}
viewTimer := queryStats.GetTimer(stats.TotalViewBuildingTime).Start()
viewAdapter, err := viewAdapterForRangeQuery(node, start, end, interval, storage, queryStats)
viewTimer.Stop()
prepareTimer := queryStats.GetTimer(stats.TotalQueryPreparationTime).Start()
closer, err := prepareRangeQuery(node, start, end, interval, storage, queryStats)
prepareTimer.Stop()
if err != nil {
return nil, err
}
defer closer.Close()
// TODO implement watchdog timer for long-running queries.
evalTimer := queryStats.GetTimer(stats.InnerEvalTime).Start()
sampleSets := map[uint64]*metric.SampleSet{}
for t := start; t.Before(end); t = t.Add(interval) {
vector := node.Eval(t, viewAdapter)
vector := node.Eval(t)
for _, sample := range vector {
samplePair := metric.SamplePair{
Value: sample.Value,
@ -444,8 +465,8 @@ func (node *VectorAggregation) groupedAggregationsToVector(aggregations map[uint
// Eval implements the VectorNode interface and returns the aggregated
// Vector.
func (node *VectorAggregation) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Vector {
vector := node.vector.Eval(timestamp, view)
func (node *VectorAggregation) Eval(timestamp clientmodel.Timestamp) Vector {
vector := node.vector.Eval(timestamp)
result := map[uint64]*groupedAggregation{}
for _, sample := range vector {
groupingKey := node.labelsToGroupingKey(sample.Metric)
@ -477,8 +498,8 @@ func (node *VectorAggregation) Eval(timestamp clientmodel.Timestamp, view *viewA
m := clientmodel.Metric{}
if node.keepExtraLabels {
m = sample.Metric
delete(m, clientmodel.MetricNameLabel)
} else {
m[clientmodel.MetricNameLabel] = sample.Metric[clientmodel.MetricNameLabel]
for _, l := range node.groupBy {
if v, ok := sample.Metric[l]; ok {
m[l] = v
@ -498,19 +519,91 @@ func (node *VectorAggregation) Eval(timestamp clientmodel.Timestamp, view *viewA
// Eval implements the VectorNode interface and returns the value of
// the selector.
func (node *VectorSelector) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Vector {
values, err := view.GetValueAtTime(node.fingerprints, timestamp)
if err != nil {
glog.Error("Unable to get vector values: ", err)
return Vector{}
func (node *VectorSelector) Eval(timestamp clientmodel.Timestamp) Vector {
//// timer := v.stats.GetTimer(stats.GetValueAtTimeTime).Start()
samples := Vector{}
for fp, it := range node.iterators {
sampleCandidates := it.GetValueAtTime(timestamp)
samplePair := chooseClosestSample(sampleCandidates, timestamp)
if samplePair != nil {
samples = append(samples, &clientmodel.Sample{
Metric: node.metrics[fp], // TODO: need copy here because downstream can modify!
Value: samplePair.Value,
Timestamp: timestamp,
})
}
}
//// timer.Stop()
return samples
}
// chooseClosestSample chooses the closest sample of a list of samples
// surrounding a given target time. If samples are found both before and after
// the target time, the sample value is interpolated between these. Otherwise,
// the single closest sample is returned verbatim.
func chooseClosestSample(samples metric.Values, timestamp clientmodel.Timestamp) *metric.SamplePair {
var closestBefore *metric.SamplePair
var closestAfter *metric.SamplePair
for _, candidate := range samples {
delta := candidate.Timestamp.Sub(timestamp)
// Samples before target time.
if delta < 0 {
// Ignore samples outside of staleness policy window.
if -delta > *stalenessDelta {
continue
}
// Ignore samples that are farther away than what we've seen before.
if closestBefore != nil && candidate.Timestamp.Before(closestBefore.Timestamp) {
continue
}
sample := candidate
closestBefore = &sample
}
// Samples after target time.
if delta >= 0 {
// Ignore samples outside of staleness policy window.
if delta > *stalenessDelta {
continue
}
// Ignore samples that are farther away than samples we've seen before.
if closestAfter != nil && candidate.Timestamp.After(closestAfter.Timestamp) {
continue
}
sample := candidate
closestAfter = &sample
}
}
switch {
case closestBefore != nil && closestAfter != nil:
return interpolateSamples(closestBefore, closestAfter, timestamp)
case closestBefore != nil:
return closestBefore
default:
return closestAfter
}
}
// interpolateSamples interpolates a value at a target time between two
// provided sample pairs.
func interpolateSamples(first, second *metric.SamplePair, timestamp clientmodel.Timestamp) *metric.SamplePair {
dv := second.Value - first.Value
dt := second.Timestamp.Sub(first.Timestamp)
dDt := dv / clientmodel.SampleValue(dt)
offset := clientmodel.SampleValue(timestamp.Sub(first.Timestamp))
return &metric.SamplePair{
Value: first.Value + (offset * dDt),
Timestamp: timestamp,
}
return values
}
// Eval implements the VectorNode interface and returns the result of
// the function call.
func (node *VectorFunctionCall) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Vector {
return node.function.callFn(timestamp, view, node.args).(Vector)
func (node *VectorFunctionCall) Eval(timestamp clientmodel.Timestamp) Vector {
return node.function.callFn(timestamp, node.args).(Vector)
}
func evalScalarBinop(opType BinOpType,
@ -626,9 +719,6 @@ func evalVectorBinop(opType BinOpType,
}
func labelsEqual(labels1, labels2 clientmodel.Metric) bool {
if len(labels1) != len(labels2) {
return false
}
for label, value := range labels1 {
if labels2[label] != value && label != clientmodel.MetricNameLabel {
return false
@ -639,39 +729,48 @@ func labelsEqual(labels1, labels2 clientmodel.Metric) bool {
// Eval implements the VectorNode interface and returns the result of
// the expression.
func (node *VectorArithExpr) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Vector {
func (node *VectorArithExpr) Eval(timestamp clientmodel.Timestamp) Vector {
result := Vector{}
if node.lhs.Type() == SCALAR && node.rhs.Type() == VECTOR {
lhs := node.lhs.(ScalarNode).Eval(timestamp, view)
rhs := node.rhs.(VectorNode).Eval(timestamp, view)
lhs := node.lhs.(ScalarNode).Eval(timestamp)
rhs := node.rhs.(VectorNode).Eval(timestamp)
for _, rhsSample := range rhs {
value, keep := evalVectorBinop(node.opType, lhs, rhsSample.Value)
if keep {
rhsSample.Value = value
if node.opType.shouldDropMetric() {
delete(rhsSample.Metric, clientmodel.MetricNameLabel)
}
result = append(result, rhsSample)
}
}
return result
} else if node.lhs.Type() == VECTOR && node.rhs.Type() == SCALAR {
lhs := node.lhs.(VectorNode).Eval(timestamp, view)
rhs := node.rhs.(ScalarNode).Eval(timestamp, view)
lhs := node.lhs.(VectorNode).Eval(timestamp)
rhs := node.rhs.(ScalarNode).Eval(timestamp)
for _, lhsSample := range lhs {
value, keep := evalVectorBinop(node.opType, lhsSample.Value, rhs)
if keep {
lhsSample.Value = value
if node.opType.shouldDropMetric() {
delete(lhsSample.Metric, clientmodel.MetricNameLabel)
}
result = append(result, lhsSample)
}
}
return result
} else if node.lhs.Type() == VECTOR && node.rhs.Type() == VECTOR {
lhs := node.lhs.(VectorNode).Eval(timestamp, view)
rhs := node.rhs.(VectorNode).Eval(timestamp, view)
lhs := node.lhs.(VectorNode).Eval(timestamp)
rhs := node.rhs.(VectorNode).Eval(timestamp)
for _, lhsSample := range lhs {
for _, rhsSample := range rhs {
if labelsEqual(lhsSample.Metric, rhsSample.Metric) {
value, keep := evalVectorBinop(node.opType, lhsSample.Value, rhsSample.Value)
if keep {
lhsSample.Value = value
if node.opType.shouldDropMetric() {
delete(lhsSample.Metric, clientmodel.MetricNameLabel)
}
result = append(result, lhsSample)
}
}
@ -684,32 +783,54 @@ func (node *VectorArithExpr) Eval(timestamp clientmodel.Timestamp, view *viewAda
// Eval implements the MatrixNode interface and returns the value of
// the selector.
func (node *MatrixSelector) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix {
func (node *MatrixSelector) Eval(timestamp clientmodel.Timestamp) Matrix {
interval := &metric.Interval{
OldestInclusive: timestamp.Add(-node.interval),
NewestInclusive: timestamp,
}
values, err := view.GetRangeValues(node.fingerprints, interval)
if err != nil {
glog.Error("Unable to get values for vector interval: ", err)
return Matrix{}
//// timer := v.stats.GetTimer(stats.GetRangeValuesTime).Start()
sampleSets := []metric.SampleSet{}
for fp, it := range node.iterators {
samplePairs := it.GetRangeValues(*interval)
if len(samplePairs) == 0 {
continue
}
sampleSet := metric.SampleSet{
Metric: node.metrics[fp], // TODO: need copy here because downstream can modify!
Values: samplePairs,
}
sampleSets = append(sampleSets, sampleSet)
}
return values
//// timer.Stop()
return sampleSets
}
// EvalBoundaries implements the MatrixNode interface and returns the
// boundary values of the selector.
func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix {
func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp) Matrix {
interval := &metric.Interval{
OldestInclusive: timestamp.Add(-node.interval),
NewestInclusive: timestamp,
}
values, err := view.GetBoundaryValues(node.fingerprints, interval)
if err != nil {
glog.Error("Unable to get boundary values for vector interval: ", err)
return Matrix{}
//// timer := v.stats.GetTimer(stats.GetBoundaryValuesTime).Start()
sampleSets := []metric.SampleSet{}
for fp, it := range node.iterators {
samplePairs := it.GetBoundaryValues(*interval)
if len(samplePairs) == 0 {
continue
}
sampleSet := metric.SampleSet{
Metric: node.metrics[fp], // TODO: make copy of metric.
Values: samplePairs,
}
sampleSets = append(sampleSets, sampleSet)
}
return values
//// timer.Stop()
return sampleSets
}
// Len implements sort.Interface.
@ -729,14 +850,14 @@ func (matrix Matrix) Swap(i, j int) {
// Eval implements the StringNode interface and returns the value of
// the selector.
func (node *StringLiteral) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) string {
func (node *StringLiteral) Eval(timestamp clientmodel.Timestamp) string {
return node.str
}
// Eval implements the StringNode interface and returns the result of
// the function call.
func (node *StringFunctionCall) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) string {
return node.function.callFn(timestamp, view, node.args).(string)
func (node *StringFunctionCall) Eval(timestamp clientmodel.Timestamp) string {
return node.function.callFn(timestamp, node.args).(string)
}
// ----------------------------------------------------------------------------
@ -754,6 +875,8 @@ func NewScalarLiteral(value clientmodel.SampleValue) *ScalarLiteral {
func NewVectorSelector(m metric.LabelMatchers) *VectorSelector {
return &VectorSelector{
labelMatchers: m,
iterators: map[clientmodel.Fingerprint]local.SeriesIterator{},
metrics: map[clientmodel.Fingerprint]clientmodel.Metric{},
}
}
@ -845,6 +968,8 @@ func NewMatrixSelector(vector *VectorSelector, interval time.Duration) *MatrixSe
return &MatrixSelector{
labelMatchers: vector.labelMatchers,
interval: interval,
iterators: map[clientmodel.Fingerprint]local.SeriesIterator{},
metrics: map[clientmodel.Fingerprint]clientmodel.Metric{},
}
}

View file

@ -31,7 +31,7 @@ type Function struct {
name string
argTypes []ExprType
returnType ExprType
callFn func(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{}
callFn func(timestamp clientmodel.Timestamp, args []Node) interface{}
}
// CheckArgTypes returns a non-nil error if the number or types of
@ -74,14 +74,14 @@ func (function *Function) CheckArgTypes(args []Node) error {
}
// === time() clientmodel.SampleValue ===
func timeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
func timeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return clientmodel.SampleValue(timestamp.Unix())
}
// === delta(matrix MatrixNode, isCounter ScalarNode) Vector ===
func deltaImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
func deltaImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
matrixNode := args[0].(MatrixNode)
isCounter := args[1].(ScalarNode).Eval(timestamp, view) > 0
isCounter := args[1].(ScalarNode).Eval(timestamp) > 0
resultVector := Vector{}
// If we treat these metrics as counters, we need to fetch all values
@ -89,9 +89,9 @@ func deltaImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node)
// I.e. if a counter resets, we want to ignore that reset.
var matrixValue Matrix
if isCounter {
matrixValue = matrixNode.Eval(timestamp, view)
matrixValue = matrixNode.Eval(timestamp)
} else {
matrixValue = matrixNode.EvalBoundaries(timestamp, view)
matrixValue = matrixNode.EvalBoundaries(timestamp)
}
for _, samples := range matrixValue {
// No sense in trying to compute a delta without at least two points. Drop
@ -133,15 +133,16 @@ func deltaImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node)
Value: resultValue,
Timestamp: timestamp,
}
delete(resultSample.Metric, clientmodel.MetricNameLabel)
resultVector = append(resultVector, resultSample)
}
return resultVector
}
// === rate(node MatrixNode) Vector ===
func rateImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
func rateImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
args = append(args, &ScalarLiteral{value: 1})
vector := deltaImpl(timestamp, view, args).(Vector)
vector := deltaImpl(timestamp, args).(Vector)
// TODO: could be other type of MatrixNode in the future (right now, only
// MatrixSelector exists). Find a better way of getting the duration of a
@ -188,28 +189,28 @@ func (s reverseHeap) Less(i, j int) bool {
}
// === sort(node VectorNode) Vector ===
func sortImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
byValueSorter := vectorByValueHeap(args[0].(VectorNode).Eval(timestamp, view))
func sortImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
byValueSorter := vectorByValueHeap(args[0].(VectorNode).Eval(timestamp))
sort.Sort(byValueSorter)
return Vector(byValueSorter)
}
// === sortDesc(node VectorNode) Vector ===
func sortDescImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
byValueSorter := vectorByValueHeap(args[0].(VectorNode).Eval(timestamp, view))
func sortDescImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
byValueSorter := vectorByValueHeap(args[0].(VectorNode).Eval(timestamp))
sort.Sort(sort.Reverse(byValueSorter))
return Vector(byValueSorter)
}
// === topk(k ScalarNode, node VectorNode) Vector ===
func topkImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
k := int(args[0].(ScalarNode).Eval(timestamp, view))
func topkImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
k := int(args[0].(ScalarNode).Eval(timestamp))
if k < 1 {
return Vector{}
}
topk := make(vectorByValueHeap, 0, k)
vector := args[1].(VectorNode).Eval(timestamp, view)
vector := args[1].(VectorNode).Eval(timestamp)
for _, el := range vector {
if len(topk) < k || topk[0].Value < el.Value {
@ -224,15 +225,15 @@ func topkImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) i
}
// === bottomk(k ScalarNode, node VectorNode) Vector ===
func bottomkImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
k := int(args[0].(ScalarNode).Eval(timestamp, view))
func bottomkImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
k := int(args[0].(ScalarNode).Eval(timestamp))
if k < 1 {
return Vector{}
}
bottomk := make(vectorByValueHeap, 0, k)
bkHeap := reverseHeap{Interface: &bottomk}
vector := args[1].(VectorNode).Eval(timestamp, view)
vector := args[1].(VectorNode).Eval(timestamp)
for _, el := range vector {
if len(bottomk) < k || bottomk[0].Value > el.Value {
@ -247,8 +248,8 @@ func bottomkImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node
}
// === drop_common_labels(node VectorNode) Vector ===
func dropCommonLabelsImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
vector := args[0].(VectorNode).Eval(timestamp, view)
func dropCommonLabelsImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
vector := args[0].(VectorNode).Eval(timestamp)
if len(vector) < 1 {
return Vector{}
}
@ -285,7 +286,7 @@ func dropCommonLabelsImpl(timestamp clientmodel.Timestamp, view *viewAdapter, ar
}
// === sampleVectorImpl() Vector ===
func sampleVectorImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
func sampleVectorImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return Vector{
&clientmodel.Sample{
Metric: clientmodel.Metric{
@ -358,8 +359,8 @@ func sampleVectorImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args [
}
// === scalar(node VectorNode) Scalar ===
func scalarImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
v := args[0].(VectorNode).Eval(timestamp, view)
func scalarImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
v := args[0].(VectorNode).Eval(timestamp)
if len(v) != 1 {
return clientmodel.SampleValue(math.NaN())
}
@ -367,13 +368,13 @@ func scalarImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node)
}
// === count_scalar(vector VectorNode) model.SampleValue ===
func countScalarImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return clientmodel.SampleValue(len(args[0].(VectorNode).Eval(timestamp, view)))
func countScalarImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return clientmodel.SampleValue(len(args[0].(VectorNode).Eval(timestamp)))
}
func aggrOverTime(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node, aggrFn func(metric.Values) clientmodel.SampleValue) interface{} {
func aggrOverTime(timestamp clientmodel.Timestamp, args []Node, aggrFn func(metric.Values) clientmodel.SampleValue) interface{} {
n := args[0].(MatrixNode)
matrixVal := n.Eval(timestamp, view)
matrixVal := n.Eval(timestamp)
resultVector := Vector{}
for _, el := range matrixVal {
@ -381,6 +382,7 @@ func aggrOverTime(timestamp clientmodel.Timestamp, view *viewAdapter, args []Nod
continue
}
delete(el.Metric, clientmodel.MetricNameLabel)
resultVector = append(resultVector, &clientmodel.Sample{
Metric: el.Metric,
Value: aggrFn(el.Values),
@ -391,8 +393,8 @@ func aggrOverTime(timestamp clientmodel.Timestamp, view *viewAdapter, args []Nod
}
// === avg_over_time(matrix MatrixNode) Vector ===
func avgOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return aggrOverTime(timestamp, view, args, func(values metric.Values) clientmodel.SampleValue {
func avgOverTimeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return aggrOverTime(timestamp, args, func(values metric.Values) clientmodel.SampleValue {
var sum clientmodel.SampleValue
for _, v := range values {
sum += v.Value
@ -402,15 +404,15 @@ func avgOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []
}
// === count_over_time(matrix MatrixNode) Vector ===
func countOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return aggrOverTime(timestamp, view, args, func(values metric.Values) clientmodel.SampleValue {
func countOverTimeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return aggrOverTime(timestamp, args, func(values metric.Values) clientmodel.SampleValue {
return clientmodel.SampleValue(len(values))
})
}
// === max_over_time(matrix MatrixNode) Vector ===
func maxOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return aggrOverTime(timestamp, view, args, func(values metric.Values) clientmodel.SampleValue {
func maxOverTimeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return aggrOverTime(timestamp, args, func(values metric.Values) clientmodel.SampleValue {
max := math.Inf(-1)
for _, v := range values {
max = math.Max(max, float64(v.Value))
@ -420,8 +422,8 @@ func maxOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []
}
// === min_over_time(matrix MatrixNode) Vector ===
func minOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return aggrOverTime(timestamp, view, args, func(values metric.Values) clientmodel.SampleValue {
func minOverTimeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return aggrOverTime(timestamp, args, func(values metric.Values) clientmodel.SampleValue {
min := math.Inf(1)
for _, v := range values {
min = math.Min(min, float64(v.Value))
@ -431,8 +433,8 @@ func minOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []
}
// === sum_over_time(matrix MatrixNode) Vector ===
func sumOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
return aggrOverTime(timestamp, view, args, func(values metric.Values) clientmodel.SampleValue {
func sumOverTimeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
return aggrOverTime(timestamp, args, func(values metric.Values) clientmodel.SampleValue {
var sum clientmodel.SampleValue
for _, v := range values {
sum += v.Value
@ -442,15 +444,39 @@ func sumOverTimeImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []
}
// === abs(vector VectorNode) Vector ===
func absImpl(timestamp clientmodel.Timestamp, view *viewAdapter, args []Node) interface{} {
func absImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
n := args[0].(VectorNode)
vector := n.Eval(timestamp, view)
vector := n.Eval(timestamp)
for _, el := range vector {
delete(el.Metric, clientmodel.MetricNameLabel)
el.Value = clientmodel.SampleValue(math.Abs(float64(el.Value)))
}
return vector
}
// === absent(vector VectorNode) Vector ===
func absentImpl(timestamp clientmodel.Timestamp, args []Node) interface{} {
n := args[0].(VectorNode)
if len(n.Eval(timestamp)) > 0 {
return Vector{}
}
m := clientmodel.Metric{}
if vs, ok := n.(*VectorSelector); ok {
for _, matcher := range vs.labelMatchers {
if matcher.Type == metric.Equal && matcher.Name != clientmodel.MetricNameLabel {
m[matcher.Name] = matcher.Value
}
}
}
return Vector{
&clientmodel.Sample{
Metric: m,
Value: 1,
Timestamp: timestamp,
},
}
}
var functions = map[string]*Function{
"abs": {
name: "abs",
@ -458,6 +484,12 @@ var functions = map[string]*Function{
returnType: VECTOR,
callFn: absImpl,
},
"absent": {
name: "absent",
argTypes: []ExprType{VECTOR},
returnType: VECTOR,
callFn: absentImpl,
},
"avg_over_time": {
name: "avg_over_time",
argTypes: []ExprType{MATRIX},

View file

@ -28,7 +28,7 @@ func (node emptyRangeNode) NodeTreeToDotGraph() string { return "" }
func (node emptyRangeNode) String() string { return "" }
func (node emptyRangeNode) Children() Nodes { return Nodes{} }
func (node emptyRangeNode) Eval(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix {
func (node emptyRangeNode) Eval(timestamp clientmodel.Timestamp) Matrix {
return Matrix{
metric.SampleSet{
Metric: clientmodel.Metric{clientmodel.MetricNameLabel: "empty_metric"},
@ -37,7 +37,7 @@ func (node emptyRangeNode) Eval(timestamp clientmodel.Timestamp, view *viewAdapt
}
}
func (node emptyRangeNode) EvalBoundaries(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix {
func (node emptyRangeNode) EvalBoundaries(timestamp clientmodel.Timestamp) Matrix {
return Matrix{
metric.SampleSet{
Metric: clientmodel.Metric{clientmodel.MetricNameLabel: "empty_metric"},
@ -48,11 +48,11 @@ func (node emptyRangeNode) EvalBoundaries(timestamp clientmodel.Timestamp, view
func TestDeltaWithEmptyElementDoesNotCrash(t *testing.T) {
now := clientmodel.Now()
vector := deltaImpl(now, nil, []Node{emptyRangeNode{}, &ScalarLiteral{value: 0}}).(Vector)
vector := deltaImpl(now, []Node{emptyRangeNode{}, &ScalarLiteral{value: 0}}).(Vector)
if len(vector) != 0 {
t.Fatalf("Expected empty result vector, got: %v", vector)
}
vector = deltaImpl(now, nil, []Node{emptyRangeNode{}, &ScalarLiteral{value: 1}}).(Vector)
vector = deltaImpl(now, []Node{emptyRangeNode{}, &ScalarLiteral{value: 1}}).(Vector)
if len(vector) != 0 {
t.Fatalf("Expected empty result vector, got: %v", vector)
}

View file

@ -23,7 +23,7 @@ import (
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/utility"
)
@ -151,18 +151,19 @@ func TypedValueToJSON(data interface{}, typeStr string) string {
}
// EvalToString evaluates the given node into a string of the given format.
func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputFormat, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) string {
viewTimer := queryStats.GetTimer(stats.TotalViewBuildingTime).Start()
viewAdapter, err := viewAdapterForInstantQuery(node, timestamp, storage, queryStats)
viewTimer.Stop()
func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputFormat, storage local.Storage, queryStats *stats.TimerGroup) string {
prepareTimer := queryStats.GetTimer(stats.TotalQueryPreparationTime).Start()
closer, err := prepareInstantQuery(node, timestamp, storage, queryStats)
prepareTimer.Stop()
if err != nil {
panic(err)
}
defer closer.Close()
evalTimer := queryStats.GetTimer(stats.InnerEvalTime).Start()
switch node.Type() {
case SCALAR:
scalar := node.(ScalarNode).Eval(timestamp, viewAdapter)
scalar := node.(ScalarNode).Eval(timestamp)
evalTimer.Stop()
switch format {
case TEXT:
@ -171,7 +172,7 @@ func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputForma
return TypedValueToJSON(scalar, "scalar")
}
case VECTOR:
vector := node.(VectorNode).Eval(timestamp, viewAdapter)
vector := node.(VectorNode).Eval(timestamp)
evalTimer.Stop()
switch format {
case TEXT:
@ -180,7 +181,7 @@ func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputForma
return TypedValueToJSON(vector, "vector")
}
case MATRIX:
matrix := node.(MatrixNode).Eval(timestamp, viewAdapter)
matrix := node.(MatrixNode).Eval(timestamp)
evalTimer.Stop()
switch format {
case TEXT:
@ -189,7 +190,7 @@ func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputForma
return TypedValueToJSON(matrix, "matrix")
}
case STRING:
str := node.(StringNode).Eval(timestamp, viewAdapter)
str := node.(StringNode).Eval(timestamp)
evalTimer.Stop()
switch format {
case TEXT:
@ -202,28 +203,29 @@ func EvalToString(node Node, timestamp clientmodel.Timestamp, format OutputForma
}
// EvalToVector evaluates the given node into a Vector. Matrices aren't supported.
func EvalToVector(node Node, timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (Vector, error) {
viewTimer := queryStats.GetTimer(stats.TotalViewBuildingTime).Start()
viewAdapter, err := viewAdapterForInstantQuery(node, timestamp, storage, queryStats)
viewTimer.Stop()
func EvalToVector(node Node, timestamp clientmodel.Timestamp, storage local.Storage, queryStats *stats.TimerGroup) (Vector, error) {
prepareTimer := queryStats.GetTimer(stats.TotalQueryPreparationTime).Start()
closer, err := prepareInstantQuery(node, timestamp, storage, queryStats)
prepareTimer.Stop()
if err != nil {
panic(err)
}
defer closer.Close()
evalTimer := queryStats.GetTimer(stats.InnerEvalTime).Start()
switch node.Type() {
case SCALAR:
scalar := node.(ScalarNode).Eval(timestamp, viewAdapter)
scalar := node.(ScalarNode).Eval(timestamp)
evalTimer.Stop()
return Vector{&clientmodel.Sample{Value: scalar}}, nil
case VECTOR:
vector := node.(VectorNode).Eval(timestamp, viewAdapter)
vector := node.(VectorNode).Eval(timestamp)
evalTimer.Stop()
return vector, nil
case MATRIX:
return nil, errors.New("Matrices not supported by EvalToVector")
case STRING:
str := node.(StringNode).Eval(timestamp, viewAdapter)
str := node.(StringNode).Eval(timestamp)
evalTimer.Stop()
return Vector{&clientmodel.Sample{
Metric: clientmodel.Metric{"__value__": clientmodel.LabelValue(str)}}}, nil

View file

@ -16,12 +16,10 @@ package ast
import (
"time"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
)
// FullRangeMap maps the fingerprint of a full range to the duration
@ -48,13 +46,13 @@ type QueryAnalyzer struct {
IntervalRanges IntervalRangeMap
// The underlying storage to which the query will be applied. Needed for
// extracting timeseries fingerprint information during query analysis.
storage metric.Persistence
storage local.Storage
}
// NewQueryAnalyzer returns a pointer to a newly instantiated
// QueryAnalyzer. The storage is needed to extract timeseries
// fingerprint information during query analysis.
func NewQueryAnalyzer(storage metric.Persistence) *QueryAnalyzer {
func NewQueryAnalyzer(storage local.Storage) *QueryAnalyzer {
return &QueryAnalyzer{
FullRanges: FullRangeMap{},
IntervalRanges: IntervalRangeMap{},
@ -66,94 +64,122 @@ func NewQueryAnalyzer(storage metric.Persistence) *QueryAnalyzer {
func (analyzer *QueryAnalyzer) Visit(node Node) {
switch n := node.(type) {
case *VectorSelector:
fingerprints, err := analyzer.storage.GetFingerprintsForLabelMatchers(n.labelMatchers)
if err != nil {
glog.Errorf("Error getting fingerprints for label matchers %v: %v", n.labelMatchers, err)
return
}
fingerprints := analyzer.storage.GetFingerprintsForLabelMatchers(n.labelMatchers)
n.fingerprints = fingerprints
for _, fingerprint := range fingerprints {
for _, fp := range fingerprints {
// Only add the fingerprint to IntervalRanges if not yet present in FullRanges.
// Full ranges always contain more points and span more time than interval ranges.
if _, alreadyInFullRanges := analyzer.FullRanges[*fingerprint]; !alreadyInFullRanges {
analyzer.IntervalRanges[*fingerprint] = true
if _, alreadyInFullRanges := analyzer.FullRanges[fp]; !alreadyInFullRanges {
analyzer.IntervalRanges[fp] = true
}
n.metrics[fp] = analyzer.storage.GetMetricForFingerprint(fp)
}
case *MatrixSelector:
fingerprints, err := analyzer.storage.GetFingerprintsForLabelMatchers(n.labelMatchers)
if err != nil {
glog.Errorf("Error getting fingerprints for label matchers %v: %v", n.labelMatchers, err)
return
}
fingerprints := analyzer.storage.GetFingerprintsForLabelMatchers(n.labelMatchers)
n.fingerprints = fingerprints
for _, fingerprint := range fingerprints {
if analyzer.FullRanges[*fingerprint] < n.interval {
analyzer.FullRanges[*fingerprint] = n.interval
for _, fp := range fingerprints {
if analyzer.FullRanges[fp] < n.interval {
analyzer.FullRanges[fp] = n.interval
// Delete the fingerprint from IntervalRanges. Full ranges always contain
// more points and span more time than interval ranges, so we don't need
// an interval range for the same fingerprint, should we have one.
delete(analyzer.IntervalRanges, *fingerprint)
delete(analyzer.IntervalRanges, fp)
}
n.metrics[fp] = analyzer.storage.GetMetricForFingerprint(fp)
}
}
}
// AnalyzeQueries walks the AST, starting at node, calling Visit on
// each node to collect fingerprints.
func (analyzer *QueryAnalyzer) AnalyzeQueries(node Node) {
type iteratorInitializer struct {
storage local.Storage
}
func (i *iteratorInitializer) Visit(node Node) {
switch n := node.(type) {
case *VectorSelector:
for _, fp := range n.fingerprints {
n.iterators[fp] = i.storage.NewIterator(fp)
}
case *MatrixSelector:
for _, fp := range n.fingerprints {
n.iterators[fp] = i.storage.NewIterator(fp)
}
}
}
func prepareInstantQuery(node Node, timestamp clientmodel.Timestamp, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) {
analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start()
analyzer := NewQueryAnalyzer(storage)
Walk(analyzer, node)
}
func viewAdapterForInstantQuery(node Node, timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (*viewAdapter, error) {
analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start()
analyzer := NewQueryAnalyzer(storage)
analyzer.AnalyzeQueries(node)
analyzeTimer.Stop()
requestBuildTimer := queryStats.GetTimer(stats.ViewRequestBuildTime).Start()
viewBuilder := storage.NewViewRequestBuilder()
for fingerprint, rangeDuration := range analyzer.FullRanges {
viewBuilder.GetMetricRange(&fingerprint, timestamp.Add(-rangeDuration), timestamp)
}
for fingerprint := range analyzer.IntervalRanges {
viewBuilder.GetMetricAtTime(&fingerprint, timestamp)
}
requestBuildTimer.Stop()
buildTimer := queryStats.GetTimer(stats.InnerViewBuildingTime).Start()
view, err := viewBuilder.Execute(60*time.Second, queryStats)
buildTimer.Stop()
if err != nil {
return nil, err
}
return NewViewAdapter(view, storage, queryStats), nil
}
func viewAdapterForRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (*viewAdapter, error) {
analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start()
analyzer := NewQueryAnalyzer(storage)
analyzer.AnalyzeQueries(node)
analyzeTimer.Stop()
requestBuildTimer := queryStats.GetTimer(stats.ViewRequestBuildTime).Start()
viewBuilder := storage.NewViewRequestBuilder()
for fingerprint, rangeDuration := range analyzer.FullRanges {
if interval < rangeDuration {
viewBuilder.GetMetricRange(&fingerprint, start.Add(-rangeDuration), end)
} else {
viewBuilder.GetMetricRangeAtInterval(&fingerprint, start.Add(-rangeDuration), end, interval, rangeDuration)
// TODO: Preloading should time out after a given duration.
preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start()
p := storage.NewPreloader()
for fp, rangeDuration := range analyzer.FullRanges {
if err := p.PreloadRange(fp, timestamp.Add(-rangeDuration), timestamp, *stalenessDelta); err != nil {
p.Close()
return nil, err
}
}
for fingerprint := range analyzer.IntervalRanges {
viewBuilder.GetMetricAtInterval(&fingerprint, start, end, interval)
for fp := range analyzer.IntervalRanges {
if err := p.PreloadRange(fp, timestamp, timestamp, *stalenessDelta); err != nil {
p.Close()
return nil, err
}
}
requestBuildTimer.Stop()
preloadTimer.Stop()
buildTimer := queryStats.GetTimer(stats.InnerViewBuildingTime).Start()
view, err := viewBuilder.Execute(time.Duration(60)*time.Second, queryStats)
buildTimer.Stop()
if err != nil {
return nil, err
ii := &iteratorInitializer{
storage: storage,
}
return NewViewAdapter(view, storage, queryStats), nil
Walk(ii, node)
return p, nil
}
func prepareRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) {
analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start()
analyzer := NewQueryAnalyzer(storage)
Walk(analyzer, node)
analyzeTimer.Stop()
// TODO: Preloading should time out after a given duration.
preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start()
p := storage.NewPreloader()
for fp, rangeDuration := range analyzer.FullRanges {
if err := p.PreloadRange(fp, start.Add(-rangeDuration), end, *stalenessDelta); err != nil {
p.Close()
return nil, err
}
/*
if interval < rangeDuration {
if err := p.GetMetricRange(fp, end, end.Sub(start)+rangeDuration); err != nil {
p.Close()
return nil, err
}
} else {
if err := p.GetMetricRangeAtInterval(fp, start, end, interval, rangeDuration); err != nil {
p.Close()
return nil, err
}
}
*/
}
for fp := range analyzer.IntervalRanges {
if err := p.PreloadRange(fp, start, end, *stalenessDelta); err != nil {
p.Close()
return nil, err
}
}
preloadTimer.Stop()
ii := &iteratorInitializer{
storage: storage,
}
Walk(ii, node)
return p, nil
}

View file

@ -13,186 +13,4 @@
package ast
import (
"flag"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
)
var defaultStalenessDelta = flag.Int("defaultStalenessDelta", 300, "Default staleness delta allowance in seconds during expression evaluations.")
// StalenessPolicy describes the lenience limits to apply to values
// from the materialized view.
type StalenessPolicy struct {
// Describes the inclusive limit at which individual points if requested will
// be matched and subject to interpolation.
DeltaAllowance time.Duration
}
type viewAdapter struct {
// Policy that dictates when sample values around an evaluation time are to
// be interpreted as stale.
stalenessPolicy StalenessPolicy
// AST-global storage to use for operations that are not supported by views
// (i.e. fingerprint->metric lookups).
storage metric.Persistence
// The materialized view which contains all timeseries data required for
// executing a query.
view metric.View
// The TimerGroup object in which to capture query timing statistics.
stats *stats.TimerGroup
}
// interpolateSamples interpolates a value at a target time between two
// provided sample pairs.
func interpolateSamples(first, second *metric.SamplePair, timestamp clientmodel.Timestamp) *metric.SamplePair {
dv := second.Value - first.Value
dt := second.Timestamp.Sub(first.Timestamp)
dDt := dv / clientmodel.SampleValue(dt)
offset := clientmodel.SampleValue(timestamp.Sub(first.Timestamp))
return &metric.SamplePair{
Value: first.Value + (offset * dDt),
Timestamp: timestamp,
}
}
// chooseClosestSample chooses the closest sample of a list of samples
// surrounding a given target time. If samples are found both before and after
// the target time, the sample value is interpolated between these. Otherwise,
// the single closest sample is returned verbatim.
func (v *viewAdapter) chooseClosestSample(samples metric.Values, timestamp clientmodel.Timestamp) *metric.SamplePair {
var closestBefore *metric.SamplePair
var closestAfter *metric.SamplePair
for _, candidate := range samples {
delta := candidate.Timestamp.Sub(timestamp)
// Samples before target time.
if delta < 0 {
// Ignore samples outside of staleness policy window.
if -delta > v.stalenessPolicy.DeltaAllowance {
continue
}
// Ignore samples that are farther away than what we've seen before.
if closestBefore != nil && candidate.Timestamp.Before(closestBefore.Timestamp) {
continue
}
sample := candidate
closestBefore = &sample
}
// Samples after target time.
if delta >= 0 {
// Ignore samples outside of staleness policy window.
if delta > v.stalenessPolicy.DeltaAllowance {
continue
}
// Ignore samples that are farther away than samples we've seen before.
if closestAfter != nil && candidate.Timestamp.After(closestAfter.Timestamp) {
continue
}
sample := candidate
closestAfter = &sample
}
}
switch {
case closestBefore != nil && closestAfter != nil:
return interpolateSamples(closestBefore, closestAfter, timestamp)
case closestBefore != nil:
return closestBefore
default:
return closestAfter
}
}
func (v *viewAdapter) GetValueAtTime(fingerprints clientmodel.Fingerprints, timestamp clientmodel.Timestamp) (Vector, error) {
timer := v.stats.GetTimer(stats.GetValueAtTimeTime).Start()
samples := Vector{}
for _, fingerprint := range fingerprints {
sampleCandidates := v.view.GetValueAtTime(fingerprint, timestamp)
samplePair := v.chooseClosestSample(sampleCandidates, timestamp)
m, err := v.storage.GetMetricForFingerprint(fingerprint)
if err != nil {
return nil, err
}
if samplePair != nil {
samples = append(samples, &clientmodel.Sample{
Metric: m,
Value: samplePair.Value,
Timestamp: timestamp,
})
}
}
timer.Stop()
return samples, nil
}
func (v *viewAdapter) GetBoundaryValues(fingerprints clientmodel.Fingerprints, interval *metric.Interval) ([]metric.SampleSet, error) {
timer := v.stats.GetTimer(stats.GetBoundaryValuesTime).Start()
sampleSets := []metric.SampleSet{}
for _, fingerprint := range fingerprints {
samplePairs := v.view.GetBoundaryValues(fingerprint, *interval)
if len(samplePairs) == 0 {
continue
}
// TODO: memoize/cache this.
m, err := v.storage.GetMetricForFingerprint(fingerprint)
if err != nil {
return nil, err
}
sampleSet := metric.SampleSet{
Metric: m,
Values: samplePairs,
}
sampleSets = append(sampleSets, sampleSet)
}
timer.Stop()
return sampleSets, nil
}
func (v *viewAdapter) GetRangeValues(fingerprints clientmodel.Fingerprints, interval *metric.Interval) ([]metric.SampleSet, error) {
timer := v.stats.GetTimer(stats.GetRangeValuesTime).Start()
sampleSets := []metric.SampleSet{}
for _, fingerprint := range fingerprints {
samplePairs := v.view.GetRangeValues(fingerprint, *interval)
if len(samplePairs) == 0 {
continue
}
// TODO: memoize/cache this.
m, err := v.storage.GetMetricForFingerprint(fingerprint)
if err != nil {
return nil, err
}
sampleSet := metric.SampleSet{
Metric: m,
Values: samplePairs,
}
sampleSets = append(sampleSets, sampleSet)
}
timer.Stop()
return sampleSets, nil
}
// NewViewAdapter returns an initialized view adapter with a default
// staleness policy (based on the --defaultStalenessDelta flag).
func NewViewAdapter(view metric.View, storage metric.Persistence, queryStats *stats.TimerGroup) *viewAdapter {
stalenessPolicy := StalenessPolicy{
DeltaAllowance: time.Duration(*defaultStalenessDelta) * time.Second,
}
return &viewAdapter{
stalenessPolicy: stalenessPolicy,
storage: storage,
view: view,
stats: queryStats,
}
}
// TODO: remove file.

View file

@ -19,6 +19,7 @@ import (
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/rules/ast"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/storage/metric"
)
@ -51,7 +52,7 @@ func getTestVectorFromTestMatrix(matrix ast.Matrix) ast.Vector {
return vector
}
func storeMatrix(storage metric.Persistence, matrix ast.Matrix) (err error) {
func storeMatrix(storage local.Storage, matrix ast.Matrix) {
pendingSamples := clientmodel.Samples{}
for _, sampleSet := range matrix {
for _, sample := range sampleSet.Values {
@ -62,8 +63,8 @@ func storeMatrix(storage metric.Persistence, matrix ast.Matrix) (err error) {
})
}
}
err = storage.AppendSamples(pendingSamples)
return
storage.AppendSamples(pendingSamples)
storage.WaitForIndexing()
}
var testMatrix = ast.Matrix{

View file

@ -27,7 +27,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notification"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/templates"
)
@ -83,20 +83,20 @@ type ruleManager struct {
done chan bool
interval time.Duration
storage metric.PreloadingPersistence
storage local.Storage
results chan<- *extraction.Result
notifications chan<- notification.NotificationReqs
results chan<- *extraction.Result
notificationHandler *notification.NotificationHandler
prometheusUrl string
}
type RuleManagerOptions struct {
EvaluationInterval time.Duration
Storage metric.PreloadingPersistence
Storage local.Storage
Notifications chan<- notification.NotificationReqs
Results chan<- *extraction.Result
NotificationHandler *notification.NotificationHandler
Results chan<- *extraction.Result
PrometheusUrl string
}
@ -106,11 +106,11 @@ func NewRuleManager(o *RuleManagerOptions) RuleManager {
rules: []rules.Rule{},
done: make(chan bool),
interval: o.EvaluationInterval,
storage: o.Storage,
results: o.Results,
notifications: o.Notifications,
prometheusUrl: o.PrometheusUrl,
interval: o.EvaluationInterval,
storage: o.Storage,
results: o.Results,
notificationHandler: o.NotificationHandler,
prometheusUrl: o.PrometheusUrl,
}
return manager
}
@ -126,17 +126,15 @@ func (m *ruleManager) Run() {
m.runIteration(m.results)
iterationDuration.Observe(float64(time.Since(start) / time.Millisecond))
case <-m.done:
glog.Info("rules.Rule manager exiting...")
glog.Info("Rule manager stopped.")
return
}
}
}
func (m *ruleManager) Stop() {
select {
case m.done <- true:
default:
}
glog.Info("Stopping rule manager...")
m.done <- true
}
func (m *ruleManager) queueAlertNotifications(rule *rules.AlertingRule, timestamp clientmodel.Timestamp) {
@ -190,7 +188,7 @@ func (m *ruleManager) queueAlertNotifications(rule *rules.AlertingRule, timestam
GeneratorURL: m.prometheusUrl + rules.GraphLinkForExpression(rule.Vector.String()),
})
}
m.notifications <- notifications
m.notificationHandler.SubmitReqs(notifications)
}
func (m *ruleManager) runIteration(results chan<- *extraction.Result) {

View file

@ -21,7 +21,7 @@ import (
"github.com/prometheus/prometheus/rules/ast"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
)
// A RecordingRule records its vector expression into new timeseries.
@ -34,11 +34,11 @@ type RecordingRule struct {
func (rule RecordingRule) Name() string { return rule.name }
func (rule RecordingRule) EvalRaw(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) {
func (rule RecordingRule) EvalRaw(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) {
return ast.EvalVectorInstant(rule.vector, timestamp, storage, stats.NewTimerGroup())
}
func (rule RecordingRule) Eval(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) {
func (rule RecordingRule) Eval(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error) {
// Get the raw value of the rule expression.
vector, err := rule.EvalRaw(timestamp, storage)
if err != nil {

View file

@ -19,7 +19,7 @@ import (
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/rules/ast"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/local"
)
// A Rule encapsulates a vector expression which is evaluated at a specified
@ -29,9 +29,9 @@ type Rule interface {
Name() string
// EvalRaw evaluates the rule's vector expression without triggering any
// other actions, like recording or alerting.
EvalRaw(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error)
EvalRaw(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error)
// Eval evaluates the rule, including any associated recording or alerting actions.
Eval(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error)
Eval(timestamp clientmodel.Timestamp, storage local.Storage) (ast.Vector, error)
// ToDotGraph returns a Graphviz dot graph of the rule.
ToDotGraph() string
// String returns a human-readable string representation of the rule.

View file

@ -24,7 +24,7 @@ import (
"github.com/prometheus/prometheus/rules/ast"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric/tiered"
"github.com/prometheus/prometheus/storage/local"
"github.com/prometheus/prometheus/utility/test"
)
@ -52,23 +52,10 @@ func vectorComparisonString(expected []string, actual []string) string {
separator)
}
type testTieredStorageCloser struct {
storage *tiered.TieredStorage
directory test.Closer
}
func (t testTieredStorageCloser) Close() {
t.storage.Close()
t.directory.Close()
}
func newTestStorage(t testing.TB) (storage *tiered.TieredStorage, closer test.Closer) {
storage, closer = tiered.NewTestTieredStorage(t)
if storage == nil {
t.Fatal("storage == nil")
}
func newTestStorage(t testing.TB) (storage local.Storage, closer test.Closer) {
storage, closer = local.NewTestStorage(t)
storeMatrix(storage, testMatrix)
return
return storage, closer
}
func TestExpressions(t *testing.T) {
@ -83,30 +70,30 @@ func TestExpressions(t *testing.T) {
}{
{
expr: `SUM(http_requests)`,
output: []string{`http_requests => 3600 @[%v]`},
output: []string{`{} => 3600 @[%v]`},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests{instance="0"}) BY(job)`,
output: []string{
`http_requests{job="api-server"} => 400 @[%v]`,
`http_requests{job="app-server"} => 1200 @[%v]`,
`{job="api-server"} => 400 @[%v]`,
`{job="app-server"} => 1200 @[%v]`,
},
fullRanges: 0,
intervalRanges: 4,
}, {
expr: `SUM(http_requests{instance="0"}) BY(job) KEEPING_EXTRA`,
output: []string{
`http_requests{instance="0", job="api-server"} => 400 @[%v]`,
`http_requests{instance="0", job="app-server"} => 1200 @[%v]`,
`{instance="0", job="api-server"} => 400 @[%v]`,
`{instance="0", job="app-server"} => 1200 @[%v]`,
},
fullRanges: 0,
intervalRanges: 4,
}, {
expr: `SUM(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`http_requests{job="app-server"} => 2600 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
`{job="app-server"} => 2600 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
@ -114,8 +101,8 @@ func TestExpressions(t *testing.T) {
// Non-existent labels mentioned in BY-clauses shouldn't propagate to output.
expr: `SUM(http_requests) BY (job, nonexistent)`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`http_requests{job="app-server"} => 2600 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
`{job="app-server"} => 2600 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
@ -125,141 +112,141 @@ func TestExpressions(t *testing.T) {
SUM(http_requests) BY /* comments shouldn't
have any effect */ (job) // another comment`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`http_requests{job="app-server"} => 2600 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
`{job="app-server"} => 2600 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `COUNT(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 4 @[%v]`,
`http_requests{job="app-server"} => 4 @[%v]`,
`{job="api-server"} => 4 @[%v]`,
`{job="app-server"} => 4 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job, group)`,
output: []string{
`http_requests{group="canary", job="api-server"} => 700 @[%v]`,
`http_requests{group="canary", job="app-server"} => 1500 @[%v]`,
`http_requests{group="production", job="api-server"} => 300 @[%v]`,
`http_requests{group="production", job="app-server"} => 1100 @[%v]`,
`{group="canary", job="api-server"} => 700 @[%v]`,
`{group="canary", job="app-server"} => 1500 @[%v]`,
`{group="production", job="api-server"} => 300 @[%v]`,
`{group="production", job="app-server"} => 1100 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `AVG(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 250 @[%v]`,
`http_requests{job="app-server"} => 650 @[%v]`,
`{job="api-server"} => 250 @[%v]`,
`{job="app-server"} => 650 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `MIN(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 100 @[%v]`,
`http_requests{job="app-server"} => 500 @[%v]`,
`{job="api-server"} => 100 @[%v]`,
`{job="app-server"} => 500 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `MAX(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 400 @[%v]`,
`http_requests{job="app-server"} => 800 @[%v]`,
`{job="api-server"} => 400 @[%v]`,
`{job="app-server"} => 800 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) - COUNT(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 996 @[%v]`,
`http_requests{job="app-server"} => 2596 @[%v]`,
`{job="api-server"} => 996 @[%v]`,
`{job="app-server"} => 2596 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `2 - SUM(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => -998 @[%v]`,
`http_requests{job="app-server"} => -2598 @[%v]`,
`{job="api-server"} => -998 @[%v]`,
`{job="app-server"} => -2598 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `1000 / SUM(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 1 @[%v]`,
`http_requests{job="app-server"} => 0.38461538461538464 @[%v]`,
`{job="api-server"} => 1 @[%v]`,
`{job="app-server"} => 0.38461538461538464 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) - 2`,
output: []string{
`http_requests{job="api-server"} => 998 @[%v]`,
`http_requests{job="app-server"} => 2598 @[%v]`,
`{job="api-server"} => 998 @[%v]`,
`{job="app-server"} => 2598 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) % 3`,
output: []string{
`http_requests{job="api-server"} => 1 @[%v]`,
`http_requests{job="app-server"} => 2 @[%v]`,
`{job="api-server"} => 1 @[%v]`,
`{job="app-server"} => 2 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) / 0`,
output: []string{
`http_requests{job="api-server"} => +Inf @[%v]`,
`http_requests{job="app-server"} => +Inf @[%v]`,
`{job="api-server"} => +Inf @[%v]`,
`{job="app-server"} => +Inf @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) > 1000`,
output: []string{
`http_requests{job="app-server"} => 2600 @[%v]`,
`{job="app-server"} => 2600 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `1000 < SUM(http_requests) BY (job)`,
output: []string{
`http_requests{job="app-server"} => 1000 @[%v]`,
`{job="app-server"} => 1000 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) <= 1000`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) != 1000`,
output: []string{
`http_requests{job="app-server"} => 2600 @[%v]`,
`{job="app-server"} => 2600 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) == 1000`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
expr: `SUM(http_requests) BY (job) + SUM(http_requests) BY (job)`,
output: []string{
`http_requests{job="api-server"} => 2000 @[%v]`,
`http_requests{job="app-server"} => 5200 @[%v]`,
`{job="api-server"} => 2000 @[%v]`,
`{job="app-server"} => 5200 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
@ -274,22 +261,22 @@ func TestExpressions(t *testing.T) {
}, {
expr: `http_requests{job="api-server", group="canary"} + delta(http_requests{job="api-server"}[5m], 1)`,
output: []string{
`http_requests{group="canary", instance="0", job="api-server"} => 330 @[%v]`,
`http_requests{group="canary", instance="1", job="api-server"} => 440 @[%v]`,
`{group="canary", instance="0", job="api-server"} => 330 @[%v]`,
`{group="canary", instance="1", job="api-server"} => 440 @[%v]`,
},
fullRanges: 4,
intervalRanges: 0,
}, {
expr: `delta(http_requests[25m], 1)`,
output: []string{
`http_requests{group="canary", instance="0", job="api-server"} => 150 @[%v]`,
`http_requests{group="canary", instance="0", job="app-server"} => 350 @[%v]`,
`http_requests{group="canary", instance="1", job="api-server"} => 200 @[%v]`,
`http_requests{group="canary", instance="1", job="app-server"} => 400 @[%v]`,
`http_requests{group="production", instance="0", job="api-server"} => 50 @[%v]`,
`http_requests{group="production", instance="0", job="app-server"} => 250 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 100 @[%v]`,
`http_requests{group="production", instance="1", job="app-server"} => 300 @[%v]`,
`{group="canary", instance="0", job="api-server"} => 150 @[%v]`,
`{group="canary", instance="0", job="app-server"} => 350 @[%v]`,
`{group="canary", instance="1", job="api-server"} => 200 @[%v]`,
`{group="canary", instance="1", job="app-server"} => 400 @[%v]`,
`{group="production", instance="0", job="api-server"} => 50 @[%v]`,
`{group="production", instance="0", job="app-server"} => 250 @[%v]`,
`{group="production", instance="1", job="api-server"} => 100 @[%v]`,
`{group="production", instance="1", job="app-server"} => 300 @[%v]`,
},
fullRanges: 8,
intervalRanges: 0,
@ -373,45 +360,45 @@ func TestExpressions(t *testing.T) {
// Lower-cased aggregation operators should work too.
expr: `sum(http_requests) by (job) + min(http_requests) by (job) + max(http_requests) by (job) + avg(http_requests) by (job)`,
output: []string{
`http_requests{job="app-server"} => 4550 @[%v]`,
`http_requests{job="api-server"} => 1750 @[%v]`,
`{job="app-server"} => 4550 @[%v]`,
`{job="api-server"} => 1750 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
}, {
// Deltas should be adjusted for target interval vs. samples under target interval.
expr: `delta(http_requests{group="canary", instance="1", job="app-server"}[18m], 1)`,
output: []string{`http_requests{group="canary", instance="1", job="app-server"} => 288 @[%v]`},
output: []string{`{group="canary", instance="1", job="app-server"} => 288 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
// Rates should transform per-interval deltas to per-second rates.
expr: `rate(http_requests{group="canary", instance="1", job="app-server"}[10m])`,
output: []string{`http_requests{group="canary", instance="1", job="app-server"} => 0.26666666666666666 @[%v]`},
output: []string{`{group="canary", instance="1", job="app-server"} => 0.26666666666666666 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
// Counter resets in middle of range are ignored by delta() if counter == 1.
expr: `delta(testcounter_reset_middle[50m], 1)`,
output: []string{`testcounter_reset_middle => 90 @[%v]`},
output: []string{`{} => 90 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
// Counter resets in middle of range are not ignored by delta() if counter == 0.
expr: `delta(testcounter_reset_middle[50m], 0)`,
output: []string{`testcounter_reset_middle => 50 @[%v]`},
output: []string{`{} => 50 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
// Counter resets at end of range are ignored by delta() if counter == 1.
expr: `delta(testcounter_reset_end[5m], 1)`,
output: []string{`testcounter_reset_end => 0 @[%v]`},
output: []string{`{} => 0 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
// Counter resets at end of range are not ignored by delta() if counter == 0.
expr: `delta(testcounter_reset_end[5m], 0)`,
output: []string{`testcounter_reset_end => -90 @[%v]`},
output: []string{`{} => -90 @[%v]`},
fullRanges: 1,
intervalRanges: 0,
}, {
@ -483,8 +470,8 @@ func TestExpressions(t *testing.T) {
{
expr: `abs(-1 * http_requests{group="production",job="api-server"})`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 100 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 200 @[%v]`,
`{group="production", instance="0", job="api-server"} => 100 @[%v]`,
`{group="production", instance="1", job="api-server"} => 200 @[%v]`,
},
fullRanges: 0,
intervalRanges: 2,
@ -492,8 +479,8 @@ func TestExpressions(t *testing.T) {
{
expr: `avg_over_time(http_requests{group="production",job="api-server"}[1h])`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 50 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 100 @[%v]`,
`{group="production", instance="0", job="api-server"} => 50 @[%v]`,
`{group="production", instance="1", job="api-server"} => 100 @[%v]`,
},
fullRanges: 2,
intervalRanges: 0,
@ -501,8 +488,8 @@ func TestExpressions(t *testing.T) {
{
expr: `count_over_time(http_requests{group="production",job="api-server"}[1h])`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 11 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 11 @[%v]`,
`{group="production", instance="0", job="api-server"} => 11 @[%v]`,
`{group="production", instance="1", job="api-server"} => 11 @[%v]`,
},
fullRanges: 2,
intervalRanges: 0,
@ -510,8 +497,8 @@ func TestExpressions(t *testing.T) {
{
expr: `max_over_time(http_requests{group="production",job="api-server"}[1h])`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 100 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 200 @[%v]`,
`{group="production", instance="0", job="api-server"} => 100 @[%v]`,
`{group="production", instance="1", job="api-server"} => 200 @[%v]`,
},
fullRanges: 2,
intervalRanges: 0,
@ -519,8 +506,8 @@ func TestExpressions(t *testing.T) {
{
expr: `min_over_time(http_requests{group="production",job="api-server"}[1h])`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 0 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 0 @[%v]`,
`{group="production", instance="0", job="api-server"} => 0 @[%v]`,
`{group="production", instance="1", job="api-server"} => 0 @[%v]`,
},
fullRanges: 2,
intervalRanges: 0,
@ -528,8 +515,8 @@ func TestExpressions(t *testing.T) {
{
expr: `sum_over_time(http_requests{group="production",job="api-server"}[1h])`,
output: []string{
`http_requests{group="production", instance="0", job="api-server"} => 550 @[%v]`,
`http_requests{group="production", instance="1", job="api-server"} => 1100 @[%v]`,
`{group="production", instance="0", job="api-server"} => 550 @[%v]`,
`{group="production", instance="1", job="api-server"} => 1100 @[%v]`,
},
fullRanges: 2,
intervalRanges: 0,
@ -582,8 +569,8 @@ func TestExpressions(t *testing.T) {
// Test alternative "by"-clause order.
expr: `sum by (group) (http_requests{job="api-server"})`,
output: []string{
`http_requests{group="canary"} => 700 @[%v]`,
`http_requests{group="production"} => 300 @[%v]`,
`{group="canary"} => 700 @[%v]`,
`{group="production"} => 300 @[%v]`,
},
fullRanges: 0,
intervalRanges: 4,
@ -592,8 +579,8 @@ func TestExpressions(t *testing.T) {
// Test alternative "by"-clause order with "keeping_extra".
expr: `sum by (group) keeping_extra (http_requests{job="api-server"})`,
output: []string{
`http_requests{group="canary", job="api-server"} => 700 @[%v]`,
`http_requests{group="production", job="api-server"} => 300 @[%v]`,
`{group="canary", job="api-server"} => 700 @[%v]`,
`{group="production", job="api-server"} => 300 @[%v]`,
},
fullRanges: 0,
intervalRanges: 4,
@ -604,16 +591,55 @@ func TestExpressions(t *testing.T) {
// in an organization), or risk serious user confusion.
expr: `sum(sum by (group) keeping_extra (http_requests{job="api-server"})) by (job)`,
output: []string{
`http_requests{job="api-server"} => 1000 @[%v]`,
`{job="api-server"} => 1000 @[%v]`,
},
fullRanges: 0,
intervalRanges: 4,
},
{
expr: `absent(nonexistent)`,
output: []string{
`{} => 1 @[%v]`,
},
fullRanges: 0,
intervalRanges: 0,
},
{
expr: `absent(nonexistent{job="testjob", instance="testinstance", method=~".*"})`,
output: []string{
`{instance="testinstance", job="testjob"} => 1 @[%v]`,
},
fullRanges: 0,
intervalRanges: 0,
},
{
expr: `count_scalar(absent(http_requests))`,
output: []string{
`scalar: 0 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
},
{
expr: `count_scalar(absent(sum(http_requests)))`,
output: []string{
`scalar: 0 @[%v]`,
},
fullRanges: 0,
intervalRanges: 8,
},
{
expr: `absent(sum(nonexistent{job="testjob", instance="testinstance"}))`,
output: []string{
`{} => 1 @[%v]`,
},
fullRanges: 0,
intervalRanges: 0,
},
}
tieredStorage, closer := newTestStorage(t)
storage, closer := newTestStorage(t)
defer closer.Close()
tieredStorage.Flush()
for i, exprTest := range expressionTests {
expectedLines := annotateWithTime(exprTest.output, testEvalTime)
@ -631,7 +657,7 @@ func TestExpressions(t *testing.T) {
t.Errorf("%d. Test should fail, but didn't", i)
}
failed := false
resultStr := ast.EvalToString(testExpr, testEvalTime, ast.TEXT, tieredStorage, stats.NewTimerGroup())
resultStr := ast.EvalToString(testExpr, testEvalTime, ast.TEXT, storage, stats.NewTimerGroup())
resultLines := strings.Split(resultStr, "\n")
if len(exprTest.output) != len(resultLines) {
@ -661,8 +687,8 @@ func TestExpressions(t *testing.T) {
}
}
analyzer := ast.NewQueryAnalyzer(tieredStorage)
analyzer.AnalyzeQueries(testExpr)
analyzer := ast.NewQueryAnalyzer(storage)
ast.Walk(analyzer, testExpr)
if exprTest.fullRanges != len(analyzer.FullRanges) {
t.Errorf("%d. Count of full ranges didn't match: %v vs %v", i, exprTest.fullRanges, len(analyzer.FullRanges))
failed = true
@ -771,9 +797,8 @@ func TestAlertingRule(t *testing.T) {
},
}
tieredStorage, closer := newTestStorage(t)
storage, closer := newTestStorage(t)
defer closer.Close()
tieredStorage.Flush()
alertExpr, err := LoadExprFromString(`http_requests{group="canary", job="app-server"} < 100`)
if err != nil {
@ -787,7 +812,7 @@ func TestAlertingRule(t *testing.T) {
for i, expected := range evalOutputs {
evalTime := testStartTime.Add(testSampleInterval * time.Duration(i))
actual, err := rule.Eval(evalTime, tieredStorage)
actual, err := rule.Eval(evalTime, storage)
if err != nil {
t.Fatalf("Error during alerting rule evaluation: %s", err)
}

View file

@ -22,8 +22,8 @@ const (
TotalEvalTime QueryTiming = iota
ResultSortTime
JsonEncodeTime
TotalViewBuildingTime
ViewRequestBuildTime
PreloadTime
TotalQueryPreparationTime
InnerViewBuildingTime
InnerEvalTime
ResultAppendTime
@ -46,10 +46,10 @@ func (s QueryTiming) String() string {
return "Result sorting time"
case JsonEncodeTime:
return "JSON encoding time"
case TotalViewBuildingTime:
return "Total view building time"
case ViewRequestBuildTime:
return "View request building time"
case PreloadTime:
return "Query preloading time"
case TotalQueryPreparationTime:
return "Total query preparation time"
case InnerViewBuildingTime:
return "Inner view building time"
case InnerEvalTime:

View file

@ -1,76 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
// RecordDecoder decodes each key-value pair in the database. The protocol
// around it makes the assumption that the underlying implementation is
// concurrency safe.
type RecordDecoder interface {
DecodeKey(in interface{}) (out interface{}, err error)
DecodeValue(in interface{}) (out interface{}, err error)
}
// FilterResult describes the record matching and scanning behavior for the
// database.
type FilterResult int
const (
// Stop scanning the database.
Stop FilterResult = iota
// Skip this record but continue scanning.
Skip
// Accept this record for the Operator.
Accept
)
func (f FilterResult) String() string {
switch f {
case Stop:
return "STOP"
case Skip:
return "SKIP"
case Accept:
return "ACCEPT"
}
panic("unknown")
}
// OperatorError is used for storage operations upon errors that may or may not
// be continuable.
type OperatorError struct {
Error error
Continuable bool
}
// RecordFilter is responsible for controlling the behavior of the database scan
// process and determines the disposition of various records.
//
// The protocol around it makes the assumption that the underlying
// implementation is concurrency safe.
type RecordFilter interface {
// Filter receives the key and value as decoded from the RecordDecoder type.
Filter(key, value interface{}) (filterResult FilterResult)
}
// RecordOperator is responsible for taking action upon each entity that is
// passed to it.
//
// The protocol around it makes the assumption that the underlying
// implementation is concurrency safe.
type RecordOperator interface {
// Take action on a given record. If the action returns an error, the entire
// scan process stops.
Operate(key, value interface{}) (err *OperatorError)
}

233
storage/local/chunk.go Normal file
View file

@ -0,0 +1,233 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"container/list"
"io"
"sync"
"sync/atomic"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
// chunkDesc contains meta-data for a chunk. Many of its methods are
// goroutine-safe proxies for chunk methods.
type chunkDesc struct {
sync.Mutex
chunk chunk // nil if chunk is evicted.
refCount int
chunkFirstTime clientmodel.Timestamp // Used if chunk is evicted.
chunkLastTime clientmodel.Timestamp // Used if chunk is evicted.
// evictListElement is nil if the chunk is not in the evict list.
// evictListElement is _not_ protected by the chunkDesc mutex.
// It must only be touched by the evict list handler in memorySeriesStorage.
evictListElement *list.Element
}
// newChunkDesc creates a new chunkDesc pointing to the provided chunk. The
// provided chunk is assumed to be not persisted yet. Therefore, the refCount of
// the new chunkDesc is 1 (preventing eviction prior to persisting).
func newChunkDesc(c chunk) *chunkDesc {
chunkOps.WithLabelValues(createAndPin).Inc()
atomic.AddInt64(&numMemChunks, 1)
// TODO: numMemChunkDescs is actually never read except during metrics
// collection. Turn it into a real metric.
atomic.AddInt64(&numMemChunkDescs, 1)
return &chunkDesc{chunk: c, refCount: 1}
}
func (cd *chunkDesc) add(s *metric.SamplePair) []chunk {
cd.Lock()
defer cd.Unlock()
return cd.chunk.add(s)
}
// pin increments the refCount by one. Upon increment from 0 to 1, this
// chunkDesc is removed from the evict list. To enable the latter, the
// evictRequests channel has to be provided.
func (cd *chunkDesc) pin(evictRequests chan<- evictRequest) {
cd.Lock()
defer cd.Unlock()
if cd.refCount == 0 {
// Remove ourselves from the evict list.
evictRequests <- evictRequest{cd, false}
}
cd.refCount++
}
// unpin decrements the refCount by one. Upon decrement from 1 to 0, this
// chunkDesc is added to the evict list. To enable the latter, the evictRequests
// channel has to be provided.
func (cd *chunkDesc) unpin(evictRequests chan<- evictRequest) {
cd.Lock()
defer cd.Unlock()
if cd.refCount == 0 {
panic("cannot unpin already unpinned chunk")
}
cd.refCount--
if cd.refCount == 0 {
// Add ourselves to the back of the evict list.
evictRequests <- evictRequest{cd, true}
}
}
func (cd *chunkDesc) getRefCount() int {
cd.Lock()
defer cd.Unlock()
return cd.refCount
}
func (cd *chunkDesc) firstTime() clientmodel.Timestamp {
cd.Lock()
defer cd.Unlock()
if cd.chunk == nil {
return cd.chunkFirstTime
}
return cd.chunk.firstTime()
}
func (cd *chunkDesc) lastTime() clientmodel.Timestamp {
cd.Lock()
defer cd.Unlock()
if cd.chunk == nil {
return cd.chunkLastTime
}
return cd.chunk.lastTime()
}
func (cd *chunkDesc) isEvicted() bool {
cd.Lock()
defer cd.Unlock()
return cd.chunk == nil
}
func (cd *chunkDesc) contains(t clientmodel.Timestamp) bool {
return !t.Before(cd.firstTime()) && !t.After(cd.lastTime())
}
func (cd *chunkDesc) setChunk(c chunk) {
cd.Lock()
defer cd.Unlock()
if cd.chunk != nil {
panic("chunk already set")
}
cd.chunk = c
}
// maybeEvict evicts the chunk if the refCount is 0. It returns whether the chunk
// is now evicted, which includes the case that the chunk was evicted even
// before this method was called.
func (cd *chunkDesc) maybeEvict() bool {
cd.Lock()
defer cd.Unlock()
if cd.chunk == nil {
return true
}
if cd.refCount != 0 {
return false
}
cd.chunkFirstTime = cd.chunk.firstTime()
cd.chunkLastTime = cd.chunk.lastTime()
cd.chunk = nil
chunkOps.WithLabelValues(evict).Inc()
atomic.AddInt64(&numMemChunks, -1)
return true
}
// chunk is the interface for all chunks. Chunks are generally not
// goroutine-safe.
type chunk interface {
// add adds a SamplePair to the chunks, performs any necessary
// re-encoding, and adds any necessary overflow chunks. It returns the
// new version of the original chunk, followed by overflow chunks, if
// any. The first chunk returned might be the same as the original one
// or a newly allocated version. In any case, take the returned chunk as
// the relevant one and discard the orginal chunk.
add(*metric.SamplePair) []chunk
clone() chunk
firstTime() clientmodel.Timestamp
lastTime() clientmodel.Timestamp
newIterator() chunkIterator
marshal(io.Writer) error
unmarshal(io.Reader) error
// values returns a channel, from which all sample values in the chunk
// can be received in order. The channel is closed after the last
// one. It is generally not safe to mutate the chunk while the channel
// is still open.
values() <-chan *metric.SamplePair
}
// A chunkIterator enables efficient access to the content of a chunk. It is
// generally not safe to use a chunkIterator concurrently with or after chunk
// mutation.
type chunkIterator interface {
// Gets the two values that are immediately adjacent to a given time. In
// case a value exist at precisely the given time, only that single
// value is returned. Only the first or last value is returned (as a
// single value), if the given time is before or after the first or last
// value, respectively.
getValueAtTime(clientmodel.Timestamp) metric.Values
// Gets all values contained within a given interval.
getRangeValues(metric.Interval) metric.Values
// Whether a given timestamp is contained between first and last value
// in the chunk.
contains(clientmodel.Timestamp) bool
}
func transcodeAndAdd(dst chunk, src chunk, s *metric.SamplePair) []chunk {
chunkOps.WithLabelValues(transcode).Inc()
head := dst
body := []chunk{}
for v := range src.values() {
newChunks := head.add(v)
body = append(body, newChunks[:len(newChunks)-1]...)
head = newChunks[len(newChunks)-1]
}
newChunks := head.add(s)
body = append(body, newChunks[:len(newChunks)-1]...)
head = newChunks[len(newChunks)-1]
return append(body, head)
}
func chunkType(c chunk) byte {
switch c.(type) {
case *deltaEncodedChunk:
return 0
default:
panic("unknown chunk type")
}
}
func chunkForType(chunkType byte) chunk {
switch chunkType {
case 0:
return newDeltaEncodedChunk(d1, d0, true)
default:
panic("unknown chunk type")
}
}

View file

@ -0,0 +1,436 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package codable provides types that implement encoding.BinaryMarshaler and
// encoding.BinaryUnmarshaler and functions that help to encode and decode
// primitives. The Prometheus storage backend uses them to persist objects to
// files and to save objects in LevelDB.
//
// The encodings used in this package are designed in a way that objects can be
// unmarshaled from a continuous byte stream, i.e. the information when to stop
// reading is determined by the format. No separate termination information is
// needed.
//
// Strings are encoded as the length of their bytes as a varint followed by
// their bytes.
//
// Slices are encoded as their length as a varint followed by their elements.
//
// Maps are encoded as the number of mappings as a varint, followed by the
// mappings, each of which consists of the key followed by the value.
package codable
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"sync"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
// A byteReader is an io.ByteReader that also implements the vanilla io.Reader
// interface.
type byteReader interface {
io.Reader
io.ByteReader
}
// bufPool is a pool for staging buffers. Using a pool allows concurrency-safe
// reuse of buffers
var bufPool sync.Pool
// getBuf returns a buffer from the pool. The length of the returned slice is l.
func getBuf(l int) []byte {
x := bufPool.Get()
if x == nil {
return make([]byte, l)
}
buf := x.([]byte)
if cap(buf) < l {
return make([]byte, l)
}
return buf[:l]
}
// putBuf returns a buffer to the pool.
func putBuf(buf []byte) {
bufPool.Put(buf)
}
// EncodeVarint encodes an int64 as a varint and writes it to an io.Writer.
// It returns the number of bytes written.
// This is a GC-friendly implementation that takes the required staging buffer
// from a buffer pool.
func EncodeVarint(w io.Writer, i int64) (int, error) {
buf := getBuf(binary.MaxVarintLen64)
defer putBuf(buf)
bytesWritten := binary.PutVarint(buf, i)
_, err := w.Write(buf[:bytesWritten])
return bytesWritten, err
}
// EncodeUint64 writes an uint64 to an io.Writer in big-endian byte-order.
// This is a GC-friendly implementation that takes the required staging buffer
// from a buffer pool.
func EncodeUint64(w io.Writer, u uint64) error {
buf := getBuf(8)
defer putBuf(buf)
binary.BigEndian.PutUint64(buf, u)
_, err := w.Write(buf)
return err
}
// DecodeUint64 reads an uint64 from an io.Reader in big-endian byte-order.
// This is a GC-friendly implementation that takes the required staging buffer
// from a buffer pool.
func DecodeUint64(r io.Reader) (uint64, error) {
buf := getBuf(8)
defer putBuf(buf)
if _, err := io.ReadFull(r, buf); err != nil {
return 0, err
}
return binary.BigEndian.Uint64(buf), nil
}
// encodeString writes the varint encoded length followed by the bytes of s to
// b.
func encodeString(b *bytes.Buffer, s string) error {
if _, err := EncodeVarint(b, int64(len(s))); err != nil {
return err
}
if _, err := b.WriteString(s); err != nil {
return err
}
return nil
}
// decodeString decodes a string encoded by encodeString.
func decodeString(b byteReader) (string, error) {
length, err := binary.ReadVarint(b)
if err != nil {
return "", err
}
buf := getBuf(int(length))
defer putBuf(buf)
if _, err := io.ReadFull(b, buf); err != nil {
return "", err
}
return string(buf), nil
}
// A Metric is a clientmodel.Metric that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
type Metric clientmodel.Metric
// MarshalBinary implements encoding.BinaryMarshaler.
func (m Metric) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if _, err := EncodeVarint(buf, int64(len(m))); err != nil {
return nil, err
}
for l, v := range m {
if err := encodeString(buf, string(l)); err != nil {
return nil, err
}
if err := encodeString(buf, string(v)); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler. It can be used with the
// zero value of Metric.
func (m *Metric) UnmarshalBinary(buf []byte) error {
return m.UnmarshalFromReader(bytes.NewReader(buf))
}
// UnmarshalFromReader unmarshals a Metric from a reader that implements
// both, io.Reader and io.ByteReader. It can be used with the zero value of
// Metric.
func (m *Metric) UnmarshalFromReader(r byteReader) error {
numLabelPairs, err := binary.ReadVarint(r)
if err != nil {
return err
}
*m = make(Metric, numLabelPairs)
for ; numLabelPairs > 0; numLabelPairs-- {
ln, err := decodeString(r)
if err != nil {
return err
}
lv, err := decodeString(r)
if err != nil {
return err
}
(*m)[clientmodel.LabelName(ln)] = clientmodel.LabelValue(lv)
}
return nil
}
// A Fingerprint is a clientmodel.Fingerprint that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. The implementation
// depends on clientmodel.Fingerprint to be convertible to uint64. It encodes
// the fingerprint as a big-endian uint64.
type Fingerprint clientmodel.Fingerprint
// MarshalBinary implements encoding.BinaryMarshaler.
func (fp Fingerprint) MarshalBinary() ([]byte, error) {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(fp))
return b, nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (fp *Fingerprint) UnmarshalBinary(buf []byte) error {
*fp = Fingerprint(binary.BigEndian.Uint64(buf))
return nil
}
// FingerprintSet is a map[clientmodel.Fingerprint]struct{} that
// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. Its
// binary form is identical to that of Fingerprints.
type FingerprintSet map[clientmodel.Fingerprint]struct{}
// MarshalBinary implements encoding.BinaryMarshaler.
func (fps FingerprintSet) MarshalBinary() ([]byte, error) {
b := make([]byte, binary.MaxVarintLen64+len(fps)*8)
lenBytes := binary.PutVarint(b, int64(len(fps)))
offset := lenBytes
for fp := range fps {
binary.BigEndian.PutUint64(b[offset:], uint64(fp))
offset += 8
}
return b[:len(fps)*8+lenBytes], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (fps *FingerprintSet) UnmarshalBinary(buf []byte) error {
numFPs, offset := binary.Varint(buf)
if offset <= 0 {
return fmt.Errorf("could not decode length of Fingerprints, varint decoding returned %d", offset)
}
*fps = make(FingerprintSet, numFPs)
for i := 0; i < int(numFPs); i++ {
(*fps)[clientmodel.Fingerprint(binary.BigEndian.Uint64(buf[offset+i*8:]))] = struct{}{}
}
return nil
}
// Fingerprints is a clientmodel.Fingerprints that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. Its binary form is
// identical to that of FingerprintSet.
type Fingerprints clientmodel.Fingerprints
// MarshalBinary implements encoding.BinaryMarshaler.
func (fps Fingerprints) MarshalBinary() ([]byte, error) {
b := make([]byte, binary.MaxVarintLen64+len(fps)*8)
lenBytes := binary.PutVarint(b, int64(len(fps)))
for i, fp := range fps {
binary.BigEndian.PutUint64(b[i*8+lenBytes:], uint64(fp))
}
return b[:len(fps)*8+lenBytes], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (fps *Fingerprints) UnmarshalBinary(buf []byte) error {
numFPs, offset := binary.Varint(buf)
if offset <= 0 {
return fmt.Errorf("could not decode length of Fingerprints, varint decoding returned %d", offset)
}
*fps = make(Fingerprints, numFPs)
for i := range *fps {
(*fps)[i] = clientmodel.Fingerprint(binary.BigEndian.Uint64(buf[offset+i*8:]))
}
return nil
}
// LabelPair is a metric.LabelPair that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
type LabelPair metric.LabelPair
// MarshalBinary implements encoding.BinaryMarshaler.
func (lp LabelPair) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := encodeString(buf, string(lp.Name)); err != nil {
return nil, err
}
if err := encodeString(buf, string(lp.Value)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (lp *LabelPair) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
v, err := decodeString(r)
if err != nil {
return err
}
lp.Name = clientmodel.LabelName(n)
lp.Value = clientmodel.LabelValue(v)
return nil
}
// LabelName is a clientmodel.LabelName that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
type LabelName clientmodel.LabelName
// MarshalBinary implements encoding.BinaryMarshaler.
func (l LabelName) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if err := encodeString(buf, string(l)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (l *LabelName) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
n, err := decodeString(r)
if err != nil {
return err
}
*l = LabelName(n)
return nil
}
// LabelValueSet is a map[clientmodel.LabelValue]struct{} that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. Its binary form is
// identical to that of LabelValues.
type LabelValueSet map[clientmodel.LabelValue]struct{}
// MarshalBinary implements encoding.BinaryMarshaler.
func (vs LabelValueSet) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if _, err := EncodeVarint(buf, int64(len(vs))); err != nil {
return nil, err
}
for v := range vs {
if err := encodeString(buf, string(v)); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (vs *LabelValueSet) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numValues, err := binary.ReadVarint(r)
if err != nil {
return err
}
*vs = make(LabelValueSet, numValues)
for i := int64(0); i < numValues; i++ {
v, err := decodeString(r)
if err != nil {
return err
}
(*vs)[clientmodel.LabelValue(v)] = struct{}{}
}
return nil
}
// LabelValues is a clientmodel.LabelValues that implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. Its binary form is
// identical to that of LabelValueSet.
type LabelValues clientmodel.LabelValues
// MarshalBinary implements encoding.BinaryMarshaler.
func (vs LabelValues) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if _, err := EncodeVarint(buf, int64(len(vs))); err != nil {
return nil, err
}
for _, v := range vs {
if err := encodeString(buf, string(v)); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (vs *LabelValues) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
numValues, err := binary.ReadVarint(r)
if err != nil {
return err
}
*vs = make(LabelValues, numValues)
for i := range *vs {
v, err := decodeString(r)
if err != nil {
return err
}
(*vs)[i] = clientmodel.LabelValue(v)
}
return nil
}
// TimeRange is used to define a time range and implements
// encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
type TimeRange struct {
First, Last clientmodel.Timestamp
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (tr TimeRange) MarshalBinary() ([]byte, error) {
buf := &bytes.Buffer{}
if _, err := EncodeVarint(buf, int64(tr.First)); err != nil {
return nil, err
}
if _, err := EncodeVarint(buf, int64(tr.Last)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (tr *TimeRange) UnmarshalBinary(buf []byte) error {
r := bytes.NewReader(buf)
first, err := binary.ReadVarint(r)
if err != nil {
return err
}
last, err := binary.ReadVarint(r)
if err != nil {
return err
}
tr.First = clientmodel.Timestamp(first)
tr.Last = clientmodel.Timestamp(last)
return nil
}

View file

@ -0,0 +1,165 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codable
import (
"bytes"
"encoding"
"reflect"
"testing"
)
func newFingerprint(fp int64) *Fingerprint {
cfp := Fingerprint(fp)
return &cfp
}
func newLabelName(ln string) *LabelName {
cln := LabelName(ln)
return &cln
}
func TestUint64(t *testing.T) {
var b bytes.Buffer
const n = 422010471112345
if err := EncodeUint64(&b, n); err != nil {
t.Fatal(err)
}
got, err := DecodeUint64(&b)
if err != nil {
t.Fatal(err)
}
if got != n {
t.Errorf("want %d, got %d", n, got)
}
}
var scenarios = []struct {
in encoding.BinaryMarshaler
out encoding.BinaryUnmarshaler
equal func(in, out interface{}) bool
}{
{
in: &Metric{
"label_1": "value_2",
"label_2": "value_2",
"label_3": "value_3",
},
out: &Metric{},
}, {
in: newFingerprint(12345),
out: newFingerprint(0),
}, {
in: &Fingerprints{1, 2, 56, 1234},
out: &Fingerprints{},
}, {
in: &Fingerprints{1, 2, 56, 1234},
out: &FingerprintSet{},
equal: func(in, out interface{}) bool {
inSet := FingerprintSet{}
for _, fp := range *(in.(*Fingerprints)) {
inSet[fp] = struct{}{}
}
return reflect.DeepEqual(inSet, *(out.(*FingerprintSet)))
},
}, {
in: &FingerprintSet{
1: struct{}{},
2: struct{}{},
56: struct{}{},
1234: struct{}{},
},
out: &FingerprintSet{},
}, {
in: &FingerprintSet{
1: struct{}{},
2: struct{}{},
56: struct{}{},
1234: struct{}{},
},
out: &Fingerprints{},
equal: func(in, out interface{}) bool {
outSet := FingerprintSet{}
for _, fp := range *(out.(*Fingerprints)) {
outSet[fp] = struct{}{}
}
return reflect.DeepEqual(outSet, *(in.(*FingerprintSet)))
},
}, {
in: &LabelPair{
Name: "label_name",
Value: "label_value",
},
out: &LabelPair{},
}, {
in: newLabelName("label_name"),
out: newLabelName(""),
}, {
in: &LabelValues{"value_1", "value_2", "value_3"},
out: &LabelValues{},
}, {
in: &LabelValues{"value_1", "value_2", "value_3"},
out: &LabelValueSet{},
equal: func(in, out interface{}) bool {
inSet := LabelValueSet{}
for _, lv := range *(in.(*LabelValues)) {
inSet[lv] = struct{}{}
}
return reflect.DeepEqual(inSet, *(out.(*LabelValueSet)))
},
}, {
in: &LabelValueSet{
"value_1": struct{}{},
"value_2": struct{}{},
"value_3": struct{}{},
},
out: &LabelValueSet{},
}, {
in: &LabelValueSet{
"value_1": struct{}{},
"value_2": struct{}{},
"value_3": struct{}{},
},
out: &LabelValues{},
equal: func(in, out interface{}) bool {
outSet := LabelValueSet{}
for _, lv := range *(out.(*LabelValues)) {
outSet[lv] = struct{}{}
}
return reflect.DeepEqual(outSet, *(in.(*LabelValueSet)))
},
}, {
in: &TimeRange{42, 2001},
out: &TimeRange{},
},
}
func TestCodec(t *testing.T) {
for i, s := range scenarios {
encoded, err := s.in.MarshalBinary()
if err != nil {
t.Fatal(err)
}
if err := s.out.UnmarshalBinary(encoded); err != nil {
t.Fatal(err)
}
equal := s.equal
if equal == nil {
equal = reflect.DeepEqual
}
if !equal(s.in, s.out) {
t.Errorf("%d. Got: %v; want %v; encoded bytes are: %v", i, s.out, s.in, encoded)
}
}
}

423
storage/local/delta.go Normal file
View file

@ -0,0 +1,423 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"encoding/binary"
"fmt"
"io"
"math"
"sort"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
type deltaBytes byte
const (
d0 deltaBytes = 0
d1 = 1
d2 = 2
d4 = 4
d8 = 8
)
// The 21-byte header of a delta-encoded chunk looks like:
//
// - time delta bytes: 1 bytes
// - value delta bytes: 1 bytes
// - is integer: 1 byte
// - base time: 8 bytes
// - base value: 8 bytes
// - used buf bytes: 2 bytes
const (
deltaHeaderBytes = 21
deltaHeaderTimeBytesOffset = 0
deltaHeaderValueBytesOffset = 1
deltaHeaderIsIntOffset = 2
deltaHeaderBaseTimeOffset = 3
deltaHeaderBaseValueOffset = 11
deltaHeaderBufLenOffset = 19
)
// A deltaEncodedChunk adaptively stores sample timestamps and values with a
// delta encoding of various types (int, float) and bit width. However, once 8
// bytes would be needed to encode a delta value, a fall-back to the absolute
// numbers happens (so that timestamps are saved directly as int64 and values as
// float64). It implements the chunk interface.
type deltaEncodedChunk struct {
buf []byte
}
// newDeltaEncodedChunk returns a newly allocated deltaEncodedChunk.
func newDeltaEncodedChunk(tb, vb deltaBytes, isInt bool) *deltaEncodedChunk {
buf := make([]byte, deltaHeaderIsIntOffset+1, 1024)
buf[deltaHeaderTimeBytesOffset] = byte(tb)
buf[deltaHeaderValueBytesOffset] = byte(vb)
if vb < d8 && isInt { // Only use int for fewer than 8 value delta bytes.
buf[deltaHeaderIsIntOffset] = 1
} else {
buf[deltaHeaderIsIntOffset] = 0
}
return &deltaEncodedChunk{
buf: buf,
}
}
func (c *deltaEncodedChunk) newFollowupChunk() chunk {
return newDeltaEncodedChunk(d1, d0, true)
}
// clone implements chunk.
func (c *deltaEncodedChunk) clone() chunk {
buf := make([]byte, len(c.buf), 1024)
copy(buf, c.buf)
return &deltaEncodedChunk{
buf: buf,
}
}
func neededDeltaBytes(deltaT clientmodel.Timestamp, deltaV clientmodel.SampleValue, isInt bool) (dtb, dvb deltaBytes) {
dtb = d1
if deltaT > math.MaxUint8 {
dtb = d2
}
if deltaT > math.MaxUint16 {
dtb = d4
}
if deltaT > math.MaxUint32 {
dtb = d8
}
if isInt {
dvb = d0
if deltaV != 0 {
dvb = d1
}
if deltaV < math.MinInt8 || deltaV > math.MaxInt8 {
dvb = d2
}
if deltaV < math.MinInt16 || deltaV > math.MaxInt16 {
dvb = d4
}
if deltaV < math.MinInt32 || deltaV > math.MaxInt32 {
dvb = d8
}
} else {
dvb = d4
if clientmodel.SampleValue(float32(deltaV)) != deltaV {
dvb = d8
}
}
return dtb, dvb
}
func max(a, b deltaBytes) deltaBytes {
if a > b {
return a
}
return b
}
func (c *deltaEncodedChunk) timeBytes() deltaBytes {
return deltaBytes(c.buf[deltaHeaderTimeBytesOffset])
}
func (c *deltaEncodedChunk) valueBytes() deltaBytes {
return deltaBytes(c.buf[deltaHeaderValueBytesOffset])
}
func (c *deltaEncodedChunk) isInt() bool {
return c.buf[deltaHeaderIsIntOffset] == 1
}
func (c *deltaEncodedChunk) baseTime() clientmodel.Timestamp {
return clientmodel.Timestamp(binary.LittleEndian.Uint64(c.buf[deltaHeaderBaseTimeOffset:]))
}
func (c *deltaEncodedChunk) baseValue() clientmodel.SampleValue {
return clientmodel.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(c.buf[deltaHeaderBaseValueOffset:])))
}
// add implements chunk.
func (c *deltaEncodedChunk) add(s *metric.SamplePair) []chunk {
if len(c.buf) < deltaHeaderBytes {
c.buf = c.buf[:deltaHeaderBytes]
binary.LittleEndian.PutUint64(c.buf[deltaHeaderBaseTimeOffset:], uint64(s.Timestamp))
binary.LittleEndian.PutUint64(c.buf[deltaHeaderBaseValueOffset:], math.Float64bits(float64(s.Value)))
}
remainingBytes := cap(c.buf) - len(c.buf)
sampleSize := c.sampleSize()
// Do we generally have space for another sample in this chunk? If not,
// overflow into a new one.
if remainingBytes < sampleSize {
overflowChunks := c.newFollowupChunk().add(s)
return []chunk{c, overflowChunks[0]}
}
dt := s.Timestamp - c.baseTime()
dv := s.Value - c.baseValue()
tb := c.timeBytes()
vb := c.valueBytes()
// If the new sample is incompatible with the current encoding, reencode the
// existing chunk data into new chunk(s).
//
// int->float.
// Note: Using math.Modf is slower than the conversion approach below.
if c.isInt() && clientmodel.SampleValue(int64(dv)) != dv {
return transcodeAndAdd(newDeltaEncodedChunk(tb, d4, false), c, s)
}
// float32->float64.
if !c.isInt() && vb == d4 && clientmodel.SampleValue(float32(dv)) != dv {
return transcodeAndAdd(newDeltaEncodedChunk(tb, d8, false), c, s)
}
if tb < d8 || vb < d8 {
// Maybe more bytes per sample.
if ntb, nvb := neededDeltaBytes(dt, dv, c.isInt()); ntb > tb || nvb > vb {
ntb = max(ntb, tb)
nvb = max(nvb, vb)
return transcodeAndAdd(newDeltaEncodedChunk(ntb, nvb, c.isInt()), c, s)
}
}
offset := len(c.buf)
c.buf = c.buf[:offset+sampleSize]
switch tb {
case d1:
c.buf[offset] = byte(dt)
case d2:
binary.LittleEndian.PutUint16(c.buf[offset:], uint16(dt))
case d4:
binary.LittleEndian.PutUint32(c.buf[offset:], uint32(dt))
case d8:
// Store the absolute value (no delta) in case of d8.
binary.LittleEndian.PutUint64(c.buf[offset:], uint64(s.Timestamp))
default:
panic("invalid number of bytes for time delta")
}
offset += int(tb)
if c.isInt() {
switch vb {
case d0:
// No-op. Constant value is stored as base value.
case d1:
c.buf[offset] = byte(dv)
case d2:
binary.LittleEndian.PutUint16(c.buf[offset:], uint16(dv))
case d4:
binary.LittleEndian.PutUint32(c.buf[offset:], uint32(dv))
// d8 must not happen. Those samples are encoded as float64.
default:
panic("invalid number of bytes for integer delta")
}
} else {
switch vb {
case d4:
binary.LittleEndian.PutUint32(c.buf[offset:], math.Float32bits(float32(dv)))
case d8:
// Store the absolute value (no delta) in case of d8.
binary.LittleEndian.PutUint64(c.buf[offset:], math.Float64bits(float64(s.Value)))
default:
panic("invalid number of bytes for floating point delta")
}
}
return []chunk{c}
}
func (c *deltaEncodedChunk) sampleSize() int {
return int(c.timeBytes() + c.valueBytes())
}
func (c *deltaEncodedChunk) len() int {
if len(c.buf) < deltaHeaderBytes {
return 0
}
return (len(c.buf) - deltaHeaderBytes) / c.sampleSize()
}
// values implements chunk.
func (c *deltaEncodedChunk) values() <-chan *metric.SamplePair {
n := c.len()
valuesChan := make(chan *metric.SamplePair)
go func() {
for i := 0; i < n; i++ {
valuesChan <- c.valueAtIndex(i)
}
close(valuesChan)
}()
return valuesChan
}
func (c *deltaEncodedChunk) valueAtIndex(idx int) *metric.SamplePair {
offset := deltaHeaderBytes + idx*c.sampleSize()
var ts clientmodel.Timestamp
switch c.timeBytes() {
case d1:
ts = c.baseTime() + clientmodel.Timestamp(uint8(c.buf[offset]))
case d2:
ts = c.baseTime() + clientmodel.Timestamp(binary.LittleEndian.Uint16(c.buf[offset:]))
case d4:
ts = c.baseTime() + clientmodel.Timestamp(binary.LittleEndian.Uint32(c.buf[offset:]))
case d8:
// Take absolute value for d8.
ts = clientmodel.Timestamp(binary.LittleEndian.Uint64(c.buf[offset:]))
default:
panic("Invalid number of bytes for time delta")
}
offset += int(c.timeBytes())
var v clientmodel.SampleValue
if c.isInt() {
switch c.valueBytes() {
case d0:
v = c.baseValue()
case d1:
v = c.baseValue() + clientmodel.SampleValue(int8(c.buf[offset]))
case d2:
v = c.baseValue() + clientmodel.SampleValue(int16(binary.LittleEndian.Uint16(c.buf[offset:])))
case d4:
v = c.baseValue() + clientmodel.SampleValue(int32(binary.LittleEndian.Uint32(c.buf[offset:])))
// No d8 for ints.
default:
panic("Invalid number of bytes for integer delta")
}
} else {
switch c.valueBytes() {
case d4:
v = c.baseValue() + clientmodel.SampleValue(math.Float32frombits(binary.LittleEndian.Uint32(c.buf[offset:])))
case d8:
// Take absolute value for d8.
v = clientmodel.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(c.buf[offset:])))
default:
panic("Invalid number of bytes for floating point delta")
}
}
return &metric.SamplePair{
Timestamp: ts,
Value: v,
}
}
// firstTime implements chunk.
func (c *deltaEncodedChunk) firstTime() clientmodel.Timestamp {
return c.valueAtIndex(0).Timestamp
}
// lastTime implements chunk.
func (c *deltaEncodedChunk) lastTime() clientmodel.Timestamp {
return c.valueAtIndex(c.len() - 1).Timestamp
}
// marshal implements chunk.
func (c *deltaEncodedChunk) marshal(w io.Writer) error {
if len(c.buf) > math.MaxUint16 {
panic("chunk buffer length would overflow a 16 bit uint.")
}
binary.LittleEndian.PutUint16(c.buf[deltaHeaderBufLenOffset:], uint16(len(c.buf)))
n, err := w.Write(c.buf[:cap(c.buf)])
if err != nil {
return err
}
if n != cap(c.buf) {
return fmt.Errorf("wanted to write %d bytes, wrote %d", len(c.buf), n)
}
return nil
}
// unmarshal implements chunk.
func (c *deltaEncodedChunk) unmarshal(r io.Reader) error {
c.buf = c.buf[:cap(c.buf)]
readBytes := 0
for readBytes < len(c.buf) {
n, err := r.Read(c.buf[readBytes:])
if err != nil {
return err
}
readBytes += n
}
c.buf = c.buf[:binary.LittleEndian.Uint16(c.buf[deltaHeaderBufLenOffset:])]
return nil
}
// deltaEncodedChunkIterator implements chunkIterator.
type deltaEncodedChunkIterator struct {
chunk *deltaEncodedChunk
// TODO: add more fields here to keep track of last position.
}
// newIterator implements chunk.
func (c *deltaEncodedChunk) newIterator() chunkIterator {
return &deltaEncodedChunkIterator{
chunk: c,
}
}
// getValueAtTime implements chunkIterator.
func (it *deltaEncodedChunkIterator) getValueAtTime(t clientmodel.Timestamp) metric.Values {
i := sort.Search(it.chunk.len(), func(i int) bool {
return !it.chunk.valueAtIndex(i).Timestamp.Before(t)
})
switch i {
case 0:
return metric.Values{*it.chunk.valueAtIndex(0)}
case it.chunk.len():
return metric.Values{*it.chunk.valueAtIndex(it.chunk.len() - 1)}
default:
v := it.chunk.valueAtIndex(i)
if v.Timestamp.Equal(t) {
return metric.Values{*v}
}
return metric.Values{*it.chunk.valueAtIndex(i - 1), *v}
}
}
// getRangeValues implements chunkIterator.
func (it *deltaEncodedChunkIterator) getRangeValues(in metric.Interval) metric.Values {
oldest := sort.Search(it.chunk.len(), func(i int) bool {
return !it.chunk.valueAtIndex(i).Timestamp.Before(in.OldestInclusive)
})
newest := sort.Search(it.chunk.len(), func(i int) bool {
return it.chunk.valueAtIndex(i).Timestamp.After(in.NewestInclusive)
})
if oldest == it.chunk.len() {
return nil
}
result := make(metric.Values, 0, newest-oldest)
for i := oldest; i < newest; i++ {
result = append(result, *it.chunk.valueAtIndex(i))
}
return result
}
// contains implements chunkIterator.
func (it *deltaEncodedChunkIterator) contains(t clientmodel.Timestamp) bool {
return !t.Before(it.chunk.firstTime()) && !t.After(it.chunk.lastTime())
}

View file

@ -0,0 +1,289 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package index provides a number of indexes backed by persistent key-value
// stores. The only supported implementation of a key-value store is currently
// goleveldb, but other implementations can easily be added.
package index
import (
"flag"
"os"
"path"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local/codable"
"github.com/prometheus/prometheus/storage/metric"
)
const (
fingerprintToMetricDir = "archived_fingerprint_to_metric"
fingerprintTimeRangeDir = "archived_fingerprint_to_timerange"
labelNameToLabelValuesDir = "labelname_to_labelvalues"
labelPairToFingerprintsDir = "labelpair_to_fingerprints"
)
var (
// TODO: Tweak default values.
fingerprintToMetricCacheSize = flag.Int("storage.local.index-cache-size.fingerprint-to-metric", 10*1024*1024, "The size in bytes for the fingerprint to metric index cache.")
fingerprintTimeRangeCacheSize = flag.Int("storage.local.index-cache-size.fingerprint-to-timerange", 5*1024*1024, "The size in bytes for the metric time range index cache.")
labelNameToLabelValuesCacheSize = flag.Int("storage.local.index-cache-size.label-name-to-label-values", 10*1024*1024, "The size in bytes for the label name to label values index cache.")
labelPairToFingerprintsCacheSize = flag.Int("storage.local.index-cache-size.label-pair-to-fingerprints", 20*1024*1024, "The size in bytes for the label pair to fingerprints index cache.")
)
// FingerprintMetricMapping is an in-memory map of fingerprints to metrics.
type FingerprintMetricMapping map[clientmodel.Fingerprint]clientmodel.Metric
// FingerprintMetricIndex models a database mapping fingerprints to metrics.
type FingerprintMetricIndex struct {
KeyValueStore
}
// IndexBatch indexes a batch of mappings from fingerprints to metrics.
//
// This method is goroutine-safe, but note that no specific order of execution
// can be guaranteed (especially critical if IndexBatch and UnindexBatch are
// called concurrently for the same fingerprint).
func (i *FingerprintMetricIndex) IndexBatch(mapping FingerprintMetricMapping) error {
b := i.NewBatch()
for fp, m := range mapping {
b.Put(codable.Fingerprint(fp), codable.Metric(m))
}
return i.Commit(b)
}
// UnindexBatch unindexes a batch of mappings from fingerprints to metrics.
//
// This method is goroutine-safe, but note that no specific order of execution
// can be guaranteed (especially critical if IndexBatch and UnindexBatch are
// called concurrently for the same fingerprint).
func (i *FingerprintMetricIndex) UnindexBatch(mapping FingerprintMetricMapping) error {
b := i.NewBatch()
for fp := range mapping {
b.Delete(codable.Fingerprint(fp))
}
return i.Commit(b)
}
// Lookup looks up a metric by fingerprint. Looking up a non-existing
// fingerprint is not an error. In that case, (nil, false, nil) is returned.
//
// This method is goroutine-safe.
func (i *FingerprintMetricIndex) Lookup(fp clientmodel.Fingerprint) (metric clientmodel.Metric, ok bool, err error) {
ok, err = i.Get(codable.Fingerprint(fp), (*codable.Metric)(&metric))
return
}
// NewFingerprintMetricIndex returns a LevelDB-backed FingerprintMetricIndex
// ready to use.
func NewFingerprintMetricIndex(basePath string) (*FingerprintMetricIndex, error) {
fingerprintToMetricDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintToMetricDir),
CacheSizeBytes: *fingerprintToMetricCacheSize,
})
if err != nil {
return nil, err
}
return &FingerprintMetricIndex{
KeyValueStore: fingerprintToMetricDB,
}, nil
}
// LabelNameLabelValuesMapping is an in-memory map of label names to
// label values.
type LabelNameLabelValuesMapping map[clientmodel.LabelName]codable.LabelValueSet
// LabelNameLabelValuesIndex is a KeyValueStore that maps existing label names
// to all label values stored for that label name.
type LabelNameLabelValuesIndex struct {
KeyValueStore
}
// IndexBatch adds a batch of label name to label values mappings to the
// index. A mapping of a label name to an empty slice of label values results in
// a deletion of that mapping from the index.
//
// While this method is fundamentally goroutine-safe, note that the order of
// execution for multiple batches executed concurrently is undefined.
func (i *LabelNameLabelValuesIndex) IndexBatch(b LabelNameLabelValuesMapping) error {
batch := i.NewBatch()
for name, values := range b {
if len(values) == 0 {
if err := batch.Delete(codable.LabelName(name)); err != nil {
return err
}
} else {
if err := batch.Put(codable.LabelName(name), values); err != nil {
return err
}
}
}
return i.Commit(batch)
}
// Lookup looks up all label values for a given label name and returns them as
// clientmodel.LabelValues (which is a slice). Looking up a non-existing label
// name is not an error. In that case, (nil, false, nil) is returned.
//
// This method is goroutine-safe.
func (i *LabelNameLabelValuesIndex) Lookup(l clientmodel.LabelName) (values clientmodel.LabelValues, ok bool, err error) {
ok, err = i.Get(codable.LabelName(l), (*codable.LabelValues)(&values))
return
}
// LookupSet looks up all label values for a given label name and returns them
// as a set. Looking up a non-existing label name is not an error. In that case,
// (nil, false, nil) is returned.
//
// This method is goroutine-safe.
func (i *LabelNameLabelValuesIndex) LookupSet(l clientmodel.LabelName) (values map[clientmodel.LabelValue]struct{}, ok bool, err error) {
ok, err = i.Get(codable.LabelName(l), (*codable.LabelValueSet)(&values))
if values == nil {
values = map[clientmodel.LabelValue]struct{}{}
}
return
}
// NewLabelNameLabelValuesIndex returns a LevelDB-backed
// LabelNameLabelValuesIndex ready to use.
func NewLabelNameLabelValuesIndex(basePath string) (*LabelNameLabelValuesIndex, error) {
labelNameToLabelValuesDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, labelNameToLabelValuesDir),
CacheSizeBytes: *labelNameToLabelValuesCacheSize,
})
if err != nil {
return nil, err
}
return &LabelNameLabelValuesIndex{
KeyValueStore: labelNameToLabelValuesDB,
}, nil
}
// DeleteLabelNameLabelValuesIndex deletes the LevelDB-backed
// LabelNameLabelValuesIndex. Use only for a not yet opened index.
func DeleteLabelNameLabelValuesIndex(basePath string) error {
return os.RemoveAll(path.Join(basePath, labelNameToLabelValuesDir))
}
// LabelPairFingerprintsMapping is an in-memory map of label pairs to
// fingerprints.
type LabelPairFingerprintsMapping map[metric.LabelPair]codable.FingerprintSet
// LabelPairFingerprintIndex is a KeyValueStore that maps existing label pairs
// to the fingerprints of all metrics containing those label pairs.
type LabelPairFingerprintIndex struct {
KeyValueStore
}
// IndexBatch indexes a batch of mappings from label pairs to fingerprints. A
// mapping to an empty slice of fingerprints results in deletion of that mapping
// from the index.
//
// While this method is fundamentally goroutine-safe, note that the order of
// execution for multiple batches executed concurrently is undefined.
func (i *LabelPairFingerprintIndex) IndexBatch(m LabelPairFingerprintsMapping) error {
batch := i.NewBatch()
for pair, fps := range m {
if len(fps) == 0 {
batch.Delete(codable.LabelPair(pair))
} else {
batch.Put(codable.LabelPair(pair), fps)
}
}
return i.Commit(batch)
}
// Lookup looks up all fingerprints for a given label pair. Looking up a
// non-existing label pair is not an error. In that case, (nil, false, nil) is
// returned.
//
// This method is goroutine-safe.
func (i *LabelPairFingerprintIndex) Lookup(p metric.LabelPair) (fps clientmodel.Fingerprints, ok bool, err error) {
ok, err = i.Get((codable.LabelPair)(p), (*codable.Fingerprints)(&fps))
return
}
// LookupSet looks up all fingerprints for a given label pair. Looking up a
// non-existing label pair is not an error. In that case, (nil, false, nil) is
// returned.
//
// This method is goroutine-safe.
func (i *LabelPairFingerprintIndex) LookupSet(p metric.LabelPair) (fps map[clientmodel.Fingerprint]struct{}, ok bool, err error) {
ok, err = i.Get((codable.LabelPair)(p), (*codable.FingerprintSet)(&fps))
if fps == nil {
fps = map[clientmodel.Fingerprint]struct{}{}
}
return
}
// NewLabelPairFingerprintIndex returns a LevelDB-backed
// LabelPairFingerprintIndex ready to use.
func NewLabelPairFingerprintIndex(basePath string) (*LabelPairFingerprintIndex, error) {
labelPairToFingerprintsDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, labelPairToFingerprintsDir),
CacheSizeBytes: *labelPairToFingerprintsCacheSize,
})
if err != nil {
return nil, err
}
return &LabelPairFingerprintIndex{
KeyValueStore: labelPairToFingerprintsDB,
}, nil
}
// DeleteLabelPairFingerprintIndex deletes the LevelDB-backed
// LabelPairFingerprintIndex. Use only for a not yet opened index.
func DeleteLabelPairFingerprintIndex(basePath string) error {
return os.RemoveAll(path.Join(basePath, labelPairToFingerprintsDir))
}
// FingerprintTimeRangeIndex models a database tracking the time ranges
// of metrics by their fingerprints.
type FingerprintTimeRangeIndex struct {
KeyValueStore
}
// Lookup returns the time range for the given fingerprint. Looking up a
// non-existing fingerprint is not an error. In that case, (0, 0, false, nil) is
// returned.
//
// This method is goroutine-safe.
func (i *FingerprintTimeRangeIndex) Lookup(fp clientmodel.Fingerprint) (firstTime, lastTime clientmodel.Timestamp, ok bool, err error) {
var tr codable.TimeRange
ok, err = i.Get(codable.Fingerprint(fp), &tr)
return tr.First, tr.Last, ok, err
}
// NewFingerprintTimeRangeIndex returns a LevelDB-backed
// FingerprintTimeRangeIndex ready to use.
func NewFingerprintTimeRangeIndex(basePath string) (*FingerprintTimeRangeIndex, error) {
fingerprintTimeRangeDB, err := NewLevelDB(LevelDBOptions{
Path: path.Join(basePath, fingerprintTimeRangeDir),
CacheSizeBytes: *fingerprintTimeRangeCacheSize,
})
if err != nil {
return nil, err
}
return &FingerprintTimeRangeIndex{
KeyValueStore: fingerprintTimeRangeDB,
}, nil
}

View file

@ -0,0 +1,61 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import "encoding"
// KeyValueStore persists key/value pairs. Implementations must be fundamentally
// goroutine-safe. However, it is the caller's responsibility that keys and
// values can be safely marshaled and unmarshaled (via the MarshalBinary and
// UnmarshalBinary methods of the keys and values). For example, if you call the
// Put method of a KeyValueStore implementation, but the key or the value are
// modified concurrently while being marshaled into its binary representation,
// you obviously have a problem. Methods of KeyValueStore return only after
// (un)marshaling is complete.
type KeyValueStore interface {
Put(key, value encoding.BinaryMarshaler) error
// Get unmarshals the result into value. It returns false if no entry
// could be found for key. If value is nil, Get behaves like Has.
Get(key encoding.BinaryMarshaler, value encoding.BinaryUnmarshaler) (bool, error)
Has(key encoding.BinaryMarshaler) (bool, error)
// Delete returns an error if key does not exist.
Delete(key encoding.BinaryMarshaler) error
NewBatch() Batch
Commit(b Batch) error
// ForEach iterates through the complete KeyValueStore and calls the
// supplied function for each mapping.
ForEach(func(kv KeyValueAccessor) error) error
Close() error
}
// KeyValueAccessor allows access to the key and value of an entry in a
// KeyValueStore.
type KeyValueAccessor interface {
Key(encoding.BinaryUnmarshaler) error
Value(encoding.BinaryUnmarshaler) error
}
// Batch allows KeyValueStore mutations to be pooled and committed together. An
// implementation does not have to be goroutine-safe. Never modify a Batch
// concurrently or commit the same batch multiple times concurrently. Marshaling
// of keys and values is guaranteed to be complete when the Put or Delete methods
// have returned.
type Batch interface {
Put(key, value encoding.BinaryMarshaler) error
Delete(key encoding.BinaryMarshaler) error
Reset()
}

View file

@ -0,0 +1,204 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"encoding"
"github.com/syndtr/goleveldb/leveldb"
leveldb_cache "github.com/syndtr/goleveldb/leveldb/cache"
leveldb_filter "github.com/syndtr/goleveldb/leveldb/filter"
leveldb_iterator "github.com/syndtr/goleveldb/leveldb/iterator"
leveldb_opt "github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
)
var (
keyspace = &leveldb_util.Range{
Start: nil,
Limit: nil,
}
iteratorOpts = &leveldb_opt.ReadOptions{
DontFillCache: true,
}
)
// LevelDB is a LevelDB-backed sorted KeyValueStore.
type LevelDB struct {
storage *leveldb.DB
readOpts *leveldb_opt.ReadOptions
writeOpts *leveldb_opt.WriteOptions
}
// LevelDBOptions provides options for a LevelDB.
type LevelDBOptions struct {
Path string // Base path to store files.
CacheSizeBytes int
}
// NewLevelDB returns a newly allocated LevelDB-backed KeyValueStore ready to
// use.
func NewLevelDB(o LevelDBOptions) (KeyValueStore, error) {
options := &leveldb_opt.Options{
Compression: leveldb_opt.SnappyCompression,
BlockCache: leveldb_cache.NewLRUCache(o.CacheSizeBytes),
Filter: leveldb_filter.NewBloomFilter(10),
}
storage, err := leveldb.OpenFile(o.Path, options)
if err != nil {
return nil, err
}
return &LevelDB{
storage: storage,
readOpts: &leveldb_opt.ReadOptions{},
writeOpts: &leveldb_opt.WriteOptions{},
}, nil
}
// NewBatch implements KeyValueStore.
func (l *LevelDB) NewBatch() Batch {
return &LevelDBBatch{
batch: &leveldb.Batch{},
}
}
// Close implements KeyValueStore.
func (l *LevelDB) Close() error {
return l.storage.Close()
}
// Get implements KeyValueStore.
func (l *LevelDB) Get(key encoding.BinaryMarshaler, value encoding.BinaryUnmarshaler) (bool, error) {
k, err := key.MarshalBinary()
if err != nil {
return false, err
}
raw, err := l.storage.Get(k, l.readOpts)
if err == leveldb.ErrNotFound {
return false, nil
}
if err != nil {
return false, err
}
if value == nil {
return true, nil
}
return true, value.UnmarshalBinary(raw)
}
// Has implements KeyValueStore.
func (l *LevelDB) Has(key encoding.BinaryMarshaler) (has bool, err error) {
return l.Get(key, nil)
}
// Delete implements KeyValueStore.
func (l *LevelDB) Delete(key encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
return l.storage.Delete(k, l.writeOpts)
}
// Put implements KeyValueStore.
func (l *LevelDB) Put(key, value encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
v, err := value.MarshalBinary()
if err != nil {
return err
}
return l.storage.Put(k, v, l.writeOpts)
}
// Commit implements KeyValueStore.
func (l *LevelDB) Commit(b Batch) error {
return l.storage.Write(b.(*LevelDBBatch).batch, l.writeOpts)
}
// ForEach implements KeyValueStore.
func (l *LevelDB) ForEach(cb func(kv KeyValueAccessor) error) error {
snap, err := l.storage.GetSnapshot()
if err != nil {
return err
}
defer snap.Release()
iter := snap.NewIterator(keyspace, iteratorOpts)
kv := &levelDBKeyValueAccessor{it: iter}
for valid := iter.First(); valid; valid = iter.Next() {
if err = iter.Error(); err != nil {
return err
}
if err := cb(kv); err != nil {
return err
}
}
return nil
}
// LevelDBBatch is a Batch implementation for LevelDB.
type LevelDBBatch struct {
batch *leveldb.Batch
}
// Put implements Batch.
func (b *LevelDBBatch) Put(key, value encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
v, err := value.MarshalBinary()
if err != nil {
return err
}
b.batch.Put(k, v)
return nil
}
// Delete implements Batch.
func (b *LevelDBBatch) Delete(key encoding.BinaryMarshaler) error {
k, err := key.MarshalBinary()
if err != nil {
return err
}
b.batch.Delete(k)
return nil
}
// Reset implements Batch.
func (b *LevelDBBatch) Reset() {
b.batch.Reset()
}
// levelDBKeyValueAccessor implements KeyValueAccessor.
type levelDBKeyValueAccessor struct {
it leveldb_iterator.Iterator
}
func (i *levelDBKeyValueAccessor) Key(key encoding.BinaryUnmarshaler) error {
return key.UnmarshalBinary(i.it.Key())
}
func (i *levelDBKeyValueAccessor) Value(value encoding.BinaryUnmarshaler) error {
return value.UnmarshalBinary(i.it.Value())
}

View file

@ -0,0 +1,92 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import "github.com/prometheus/client_golang/prometheus"
// Usually, a separate file for instrumentation is frowned upon. Metrics should
// be close to where they are used. However, the metrics below are set all over
// the place, so we go for a separate instrumentation file in this case.
var (
chunkOps = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "chunk_ops_total",
Help: "The total number of chunk operations by their type.",
},
[]string{opTypeLabel},
)
chunkDescOps = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "chunkdesc_ops_total",
Help: "The total number of chunk descriptor operations by their type.",
},
[]string{opTypeLabel},
)
)
const (
namespace = "prometheus"
subsystem = "local_storage"
opTypeLabel = "type"
// Op-types for seriesOps.
create = "create"
archive = "archive"
unarchive = "unarchive"
memoryPurge = "purge_from_memory"
archivePurge = "purge_from_archive"
memoryMaintenance = "maintenance_in_memory"
archiveMaintenance = "maintenance_in_archive"
// Op-types for chunkOps.
createAndPin = "create" // A chunkDesc creation with refCount=1.
persistAndUnpin = "persist"
pin = "pin" // Excluding the pin on creation.
unpin = "unpin" // Excluding the unpin on persisting.
clone = "clone"
transcode = "transcode"
purge = "purge"
// Op-types for chunkOps and chunkDescOps.
evict = "evict"
load = "load"
)
func init() {
prometheus.MustRegister(chunkOps)
prometheus.MustRegister(chunkDescOps)
}
var (
// Global counters, also used internally, so not implemented as
// metrics. Collected in memorySeriesStorage.Collect.
numMemChunks, numMemChunkDescs int64
// Metric descriptors for the above.
numMemChunksDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "memory_chunks"),
"The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).",
nil, nil,
)
numMemChunkDescsDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "memory_chunkdescs"),
"The current number of chunk descriptors in memory.",
nil, nil,
)
)

View file

@ -0,0 +1,87 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/storage/metric"
)
// Storage ingests and manages samples, along with various indexes. All methods
// are goroutine-safe.
type Storage interface {
prometheus.Collector
// AppendSamples stores a group of new samples. Multiple samples for the same
// fingerprint need to be submitted in chronological order, from oldest to
// newest (both in the same call to AppendSamples and across multiple calls).
AppendSamples(clientmodel.Samples)
// NewPreloader returns a new Preloader which allows preloading and pinning
// series data into memory for use within a query.
NewPreloader() Preloader
// Get all of the metric fingerprints that are associated with the
// provided label matchers.
GetFingerprintsForLabelMatchers(metric.LabelMatchers) clientmodel.Fingerprints
// Get all of the label values that are associated with a given label name.
GetLabelValuesForLabelName(clientmodel.LabelName) clientmodel.LabelValues
// Get the metric associated with the provided fingerprint.
GetMetricForFingerprint(clientmodel.Fingerprint) clientmodel.Metric
// Construct an iterator for a given fingerprint.
NewIterator(clientmodel.Fingerprint) SeriesIterator
// Run the various maintenance loops in goroutines. Returns when the
// storage is ready to use. Keeps everything running in the background
// until Stop is called.
Start()
// Stop shuts down the Storage gracefully, flushes all pending
// operations, stops all maintenance loops,and frees all resources.
Stop() error
// WaitForIndexing returns once all samples in the storage are
// indexed. Indexing is needed for GetFingerprintsForLabelMatchers and
// GetLabelValuesForLabelName and may lag behind.
WaitForIndexing()
}
// SeriesIterator enables efficient access of sample values in a series. All
// methods are goroutine-safe. A SeriesIterator iterates over a snapshot of a
// series, i.e. it is safe to continue using a SeriesIterator after modifying
// the corresponding series, but the iterator will represent the state of the
// series prior the modification.
type SeriesIterator interface {
// Gets the two values that are immediately adjacent to a given time. In
// case a value exist at precisely the given time, only that single
// value is returned. Only the first or last value is returned (as a
// single value), if the given time is before or after the first or last
// value, respectively.
GetValueAtTime(clientmodel.Timestamp) metric.Values
// Gets the boundary values of an interval: the first and last value
// within a given interval.
GetBoundaryValues(metric.Interval) metric.Values
// Gets all values contained within a given interval.
GetRangeValues(metric.Interval) metric.Values
}
// A Preloader preloads series data necessary for a query into memory and pins
// them until released via Close(). Its methods are generally not
// goroutine-safe.
type Preloader interface {
PreloadRange(
fp clientmodel.Fingerprint,
from clientmodel.Timestamp, through clientmodel.Timestamp,
stalenessDelta time.Duration,
) error
// Close unpins any previously requested series data from memory.
Close()
}

43
storage/local/locker.go Normal file
View file

@ -0,0 +1,43 @@
package local
import (
"sync"
clientmodel "github.com/prometheus/client_golang/model"
)
// fingerprintLocker allows locking individual fingerprints. To limit the number
// of mutexes needed for that, only a fixed number of mutexes are
// allocated. Fingerprints to be locked are assigned to those pre-allocated
// mutexes by their value. (Note that fingerprints are calculated by a hash
// function, so that an approximately equal distribution over the mutexes is
// expected, even without additional hashing of the fingerprint value.)
// Collisions are not detected. If two fingerprints get assigned to the same
// mutex, only one of them can be locked at the same time. As long as the number
// of pre-allocated mutexes is much larger than the number of goroutines
// requiring a fingerprint lock concurrently, the loss in efficiency is
// small. However, a goroutine must never lock more than one fingerprint at the
// same time. (In that case a collision would try to acquire the same mutex
// twice).
type fingerprintLocker struct {
fpMtxs []sync.Mutex
numFpMtxs uint
}
// newFingerprintLocker returns a new fingerprintLocker ready for use.
func newFingerprintLocker(preallocatedMutexes int) *fingerprintLocker {
return &fingerprintLocker{
make([]sync.Mutex, preallocatedMutexes),
uint(preallocatedMutexes),
}
}
// Lock locks the given fingerprint.
func (l *fingerprintLocker) Lock(fp clientmodel.Fingerprint) {
l.fpMtxs[uint(fp)%l.numFpMtxs].Lock()
}
// Unlock unlocks the given fingerprint.
func (l *fingerprintLocker) Unlock(fp clientmodel.Fingerprint) {
l.fpMtxs[uint(fp)%l.numFpMtxs].Unlock()
}

View file

@ -0,0 +1,45 @@
package local
import (
"sync"
"testing"
clientmodel "github.com/prometheus/client_golang/model"
)
func BenchmarkFingerprintLockerParallel(b *testing.B) {
numGoroutines := 10
numFingerprints := 10
numLockOps := b.N
locker := newFingerprintLocker(100)
wg := sync.WaitGroup{}
b.ResetTimer()
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(i int) {
for j := 0; j < numLockOps; j++ {
fp1 := clientmodel.Fingerprint(j % numFingerprints)
fp2 := clientmodel.Fingerprint(j%numFingerprints + numFingerprints)
locker.Lock(fp1)
locker.Lock(fp2)
locker.Unlock(fp2)
locker.Unlock(fp1)
}
wg.Done()
}(i)
}
wg.Wait()
}
func BenchmarkFingerprintLockerSerial(b *testing.B) {
numFingerprints := 10
locker := newFingerprintLocker(100)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fp := clientmodel.Fingerprint(i % numFingerprints)
locker.Lock(fp)
locker.Unlock(fp)
}
}

1483
storage/local/persistence.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,623 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"reflect"
"testing"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local/codable"
"github.com/prometheus/prometheus/storage/local/index"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility/test"
)
var (
m1 = clientmodel.Metric{"label": "value1"}
m2 = clientmodel.Metric{"label": "value2"}
m3 = clientmodel.Metric{"label": "value3"}
)
func newTestPersistence(t *testing.T) (*persistence, test.Closer) {
dir := test.NewTemporaryDirectory("test_persistence", t)
p, err := newPersistence(dir.Path(), 1024, false)
if err != nil {
dir.Close()
t.Fatal(err)
}
return p, test.NewCallbackCloser(func() {
p.close()
dir.Close()
})
}
func buildTestChunks() map[clientmodel.Fingerprint][]chunk {
fps := clientmodel.Fingerprints{
m1.Fingerprint(),
m2.Fingerprint(),
m3.Fingerprint(),
}
fpToChunks := map[clientmodel.Fingerprint][]chunk{}
for _, fp := range fps {
fpToChunks[fp] = make([]chunk, 0, 10)
for i := 0; i < 10; i++ {
fpToChunks[fp] = append(fpToChunks[fp], newDeltaEncodedChunk(d1, d1, true).add(&metric.SamplePair{
Timestamp: clientmodel.Timestamp(i),
Value: clientmodel.SampleValue(fp),
})[0])
}
}
return fpToChunks
}
func chunksEqual(c1, c2 chunk) bool {
values2 := c2.values()
for v1 := range c1.values() {
v2 := <-values2
if !v1.Equal(v2) {
return false
}
}
return true
}
func TestPersistLoadDropChunks(t *testing.T) {
p, closer := newTestPersistence(t)
defer closer.Close()
fpToChunks := buildTestChunks()
for fp, chunks := range fpToChunks {
for i, c := range chunks {
index, err := p.persistChunk(fp, c)
if err != nil {
t.Fatal(err)
}
if i != index {
t.Errorf("Want chunk index %d, got %d.", i, index)
}
}
}
for fp, expectedChunks := range fpToChunks {
indexes := make([]int, 0, len(expectedChunks))
for i := range expectedChunks {
indexes = append(indexes, i)
}
actualChunks, err := p.loadChunks(fp, indexes, 0)
if err != nil {
t.Fatal(err)
}
for _, i := range indexes {
if !chunksEqual(expectedChunks[i], actualChunks[i]) {
t.Errorf("%d. Chunks not equal.", i)
}
}
// Load all chunk descs.
actualChunkDescs, err := p.loadChunkDescs(fp, 10)
if len(actualChunkDescs) != 10 {
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
}
for i, cd := range actualChunkDescs {
if cd.firstTime() != clientmodel.Timestamp(i) || cd.lastTime() != clientmodel.Timestamp(i) {
t.Errorf(
"Want ts=%v, got firstTime=%v, lastTime=%v.",
i, cd.firstTime(), cd.lastTime(),
)
}
}
// Load chunk descs partially.
actualChunkDescs, err = p.loadChunkDescs(fp, 5)
if len(actualChunkDescs) != 5 {
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 5)
}
for i, cd := range actualChunkDescs {
if cd.firstTime() != clientmodel.Timestamp(i) || cd.lastTime() != clientmodel.Timestamp(i) {
t.Errorf(
"Want ts=%v, got firstTime=%v, lastTime=%v.",
i, cd.firstTime(), cd.lastTime(),
)
}
}
}
// Drop half of the chunks.
for fp, expectedChunks := range fpToChunks {
firstTime, numDropped, allDropped, err := p.dropChunks(fp, 5)
if err != nil {
t.Fatal(err)
}
if firstTime != 5 {
t.Errorf("want first time 5, got %d", firstTime)
}
if numDropped != 5 {
t.Errorf("want 5 dropped chunks, got %v", numDropped)
}
if allDropped {
t.Error("all chunks dropped")
}
indexes := make([]int, 5)
for i := range indexes {
indexes[i] = i
}
actualChunks, err := p.loadChunks(fp, indexes, 0)
if err != nil {
t.Fatal(err)
}
for _, i := range indexes {
if !chunksEqual(expectedChunks[i+5], actualChunks[i]) {
t.Errorf("%d. Chunks not equal.", i)
}
}
}
// Drop all the chunks.
for fp := range fpToChunks {
firstTime, numDropped, allDropped, err := p.dropChunks(fp, 100)
if firstTime != 0 {
t.Errorf("want first time 0, got %d", firstTime)
}
if err != nil {
t.Fatal(err)
}
if numDropped != 5 {
t.Errorf("want 5 dropped chunks, got %v", numDropped)
}
if !allDropped {
t.Error("not all chunks dropped")
}
}
}
func TestCheckpointAndLoadSeriesMapAndHeads(t *testing.T) {
p, closer := newTestPersistence(t)
defer closer.Close()
fpLocker := newFingerprintLocker(10)
sm := newSeriesMap()
s1 := newMemorySeries(m1, true, 0)
s2 := newMemorySeries(m2, false, 0)
s3 := newMemorySeries(m3, false, 0)
s1.add(m1.Fingerprint(), &metric.SamplePair{Timestamp: 1, Value: 3.14})
s3.add(m1.Fingerprint(), &metric.SamplePair{Timestamp: 2, Value: 2.7})
s3.headChunkPersisted = true
sm.put(m1.Fingerprint(), s1)
sm.put(m2.Fingerprint(), s2)
sm.put(m3.Fingerprint(), s3)
if err := p.checkpointSeriesMapAndHeads(sm, fpLocker); err != nil {
t.Fatal(err)
}
loadedSM, err := p.loadSeriesMapAndHeads()
if err != nil {
t.Fatal(err)
}
if loadedSM.length() != 2 {
t.Errorf("want 2 series in map, got %d", loadedSM.length())
}
if loadedS1, ok := loadedSM.get(m1.Fingerprint()); ok {
if !reflect.DeepEqual(loadedS1.metric, m1) {
t.Errorf("want metric %v, got %v", m1, loadedS1.metric)
}
if !reflect.DeepEqual(loadedS1.head().chunk, s1.head().chunk) {
t.Error("head chunks differ")
}
if loadedS1.chunkDescsOffset != 0 {
t.Errorf("want chunkDescsOffset 0, got %d", loadedS1.chunkDescsOffset)
}
if loadedS1.headChunkPersisted {
t.Error("headChunkPersisted is true")
}
} else {
t.Errorf("couldn't find %v in loaded map", m1)
}
if loadedS3, ok := loadedSM.get(m3.Fingerprint()); ok {
if !reflect.DeepEqual(loadedS3.metric, m3) {
t.Errorf("want metric %v, got %v", m3, loadedS3.metric)
}
if loadedS3.head().chunk != nil {
t.Error("head chunk not evicted")
}
if loadedS3.chunkDescsOffset != -1 {
t.Errorf("want chunkDescsOffset -1, got %d", loadedS3.chunkDescsOffset)
}
if !loadedS3.headChunkPersisted {
t.Error("headChunkPersisted is false")
}
} else {
t.Errorf("couldn't find %v in loaded map", m1)
}
}
func TestGetFingerprintsModifiedBefore(t *testing.T) {
p, closer := newTestPersistence(t)
defer closer.Close()
m1 := clientmodel.Metric{"n1": "v1"}
m2 := clientmodel.Metric{"n2": "v2"}
m3 := clientmodel.Metric{"n1": "v2"}
p.archiveMetric(1, m1, 2, 4)
p.archiveMetric(2, m2, 1, 6)
p.archiveMetric(3, m3, 5, 5)
expectedFPs := map[clientmodel.Timestamp][]clientmodel.Fingerprint{
0: {},
1: {},
2: {2},
3: {1, 2},
4: {1, 2},
5: {1, 2},
6: {1, 2, 3},
}
for ts, want := range expectedFPs {
got, err := p.getFingerprintsModifiedBefore(ts)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(want, got) {
t.Errorf("timestamp: %v, want FPs %v, got %v", ts, want, got)
}
}
unarchived, firstTime, err := p.unarchiveMetric(1)
if err != nil {
t.Fatal(err)
}
if !unarchived {
t.Fatal("expected actual unarchival")
}
if firstTime != 2 {
t.Errorf("expected first time 2, got %v", firstTime)
}
unarchived, firstTime, err = p.unarchiveMetric(1)
if err != nil {
t.Fatal(err)
}
if unarchived {
t.Fatal("expected no unarchival")
}
expectedFPs = map[clientmodel.Timestamp][]clientmodel.Fingerprint{
0: {},
1: {},
2: {2},
3: {2},
4: {2},
5: {2},
6: {2, 3},
}
for ts, want := range expectedFPs {
got, err := p.getFingerprintsModifiedBefore(ts)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(want, got) {
t.Errorf("timestamp: %v, want FPs %v, got %v", ts, want, got)
}
}
}
func TestDropArchivedMetric(t *testing.T) {
p, closer := newTestPersistence(t)
defer closer.Close()
m1 := clientmodel.Metric{"n1": "v1"}
m2 := clientmodel.Metric{"n2": "v2"}
p.archiveMetric(1, m1, 2, 4)
p.archiveMetric(2, m2, 1, 6)
p.indexMetric(1, m1)
p.indexMetric(2, m2)
p.waitForIndexing()
outFPs, err := p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n1", Value: "v1"})
if err != nil {
t.Fatal(err)
}
want := clientmodel.Fingerprints{1}
if !reflect.DeepEqual(outFPs, want) {
t.Errorf("want %#v, got %#v", want, outFPs)
}
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n2", Value: "v2"})
if err != nil {
t.Fatal(err)
}
want = clientmodel.Fingerprints{2}
if !reflect.DeepEqual(outFPs, want) {
t.Errorf("want %#v, got %#v", want, outFPs)
}
if archived, _, _, err := p.hasArchivedMetric(1); err != nil || !archived {
t.Error("want FP 1 archived")
}
if archived, _, _, err := p.hasArchivedMetric(2); err != nil || !archived {
t.Error("want FP 2 archived")
}
if err != p.dropArchivedMetric(1) {
t.Fatal(err)
}
if err != p.dropArchivedMetric(3) {
// Dropping something that has not beet archived is not an error.
t.Fatal(err)
}
p.waitForIndexing()
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n1", Value: "v1"})
if err != nil {
t.Fatal(err)
}
want = nil
if !reflect.DeepEqual(outFPs, want) {
t.Errorf("want %#v, got %#v", want, outFPs)
}
outFPs, err = p.getFingerprintsForLabelPair(metric.LabelPair{Name: "n2", Value: "v2"})
if err != nil {
t.Fatal(err)
}
want = clientmodel.Fingerprints{2}
if !reflect.DeepEqual(outFPs, want) {
t.Errorf("want %#v, got %#v", want, outFPs)
}
if archived, _, _, err := p.hasArchivedMetric(1); err != nil || archived {
t.Error("want FP 1 not archived")
}
if archived, _, _, err := p.hasArchivedMetric(2); err != nil || !archived {
t.Error("want FP 2 archived")
}
}
type incrementalBatch struct {
fpToMetric index.FingerprintMetricMapping
expectedLnToLvs index.LabelNameLabelValuesMapping
expectedLpToFps index.LabelPairFingerprintsMapping
}
func TestIndexing(t *testing.T) {
batches := []incrementalBatch{
{
fpToMetric: index.FingerprintMetricMapping{
0: {
clientmodel.MetricNameLabel: "metric_0",
"label_1": "value_1",
},
1: {
clientmodel.MetricNameLabel: "metric_0",
"label_2": "value_2",
"label_3": "value_3",
},
2: {
clientmodel.MetricNameLabel: "metric_1",
"label_1": "value_2",
},
},
expectedLnToLvs: index.LabelNameLabelValuesMapping{
clientmodel.MetricNameLabel: codable.LabelValueSet{
"metric_0": struct{}{},
"metric_1": struct{}{},
},
"label_1": codable.LabelValueSet{
"value_1": struct{}{},
"value_2": struct{}{},
},
"label_2": codable.LabelValueSet{
"value_2": struct{}{},
},
"label_3": codable.LabelValueSet{
"value_3": struct{}{},
},
},
expectedLpToFps: index.LabelPairFingerprintsMapping{
metric.LabelPair{
Name: clientmodel.MetricNameLabel,
Value: "metric_0",
}: codable.FingerprintSet{0: struct{}{}, 1: struct{}{}},
metric.LabelPair{
Name: clientmodel.MetricNameLabel,
Value: "metric_1",
}: codable.FingerprintSet{2: struct{}{}},
metric.LabelPair{
Name: "label_1",
Value: "value_1",
}: codable.FingerprintSet{0: struct{}{}},
metric.LabelPair{
Name: "label_1",
Value: "value_2",
}: codable.FingerprintSet{2: struct{}{}},
metric.LabelPair{
Name: "label_2",
Value: "value_2",
}: codable.FingerprintSet{1: struct{}{}},
metric.LabelPair{
Name: "label_3",
Value: "value_3",
}: codable.FingerprintSet{1: struct{}{}},
},
}, {
fpToMetric: index.FingerprintMetricMapping{
3: {
clientmodel.MetricNameLabel: "metric_0",
"label_1": "value_3",
},
4: {
clientmodel.MetricNameLabel: "metric_2",
"label_2": "value_2",
"label_3": "value_1",
},
5: {
clientmodel.MetricNameLabel: "metric_1",
"label_1": "value_3",
},
},
expectedLnToLvs: index.LabelNameLabelValuesMapping{
clientmodel.MetricNameLabel: codable.LabelValueSet{
"metric_0": struct{}{},
"metric_1": struct{}{},
"metric_2": struct{}{},
},
"label_1": codable.LabelValueSet{
"value_1": struct{}{},
"value_2": struct{}{},
"value_3": struct{}{},
},
"label_2": codable.LabelValueSet{
"value_2": struct{}{},
},
"label_3": codable.LabelValueSet{
"value_1": struct{}{},
"value_3": struct{}{},
},
},
expectedLpToFps: index.LabelPairFingerprintsMapping{
metric.LabelPair{
Name: clientmodel.MetricNameLabel,
Value: "metric_0",
}: codable.FingerprintSet{0: struct{}{}, 1: struct{}{}, 3: struct{}{}},
metric.LabelPair{
Name: clientmodel.MetricNameLabel,
Value: "metric_1",
}: codable.FingerprintSet{2: struct{}{}, 5: struct{}{}},
metric.LabelPair{
Name: clientmodel.MetricNameLabel,
Value: "metric_2",
}: codable.FingerprintSet{4: struct{}{}},
metric.LabelPair{
Name: "label_1",
Value: "value_1",
}: codable.FingerprintSet{0: struct{}{}},
metric.LabelPair{
Name: "label_1",
Value: "value_2",
}: codable.FingerprintSet{2: struct{}{}},
metric.LabelPair{
Name: "label_1",
Value: "value_3",
}: codable.FingerprintSet{3: struct{}{}, 5: struct{}{}},
metric.LabelPair{
Name: "label_2",
Value: "value_2",
}: codable.FingerprintSet{1: struct{}{}, 4: struct{}{}},
metric.LabelPair{
Name: "label_3",
Value: "value_1",
}: codable.FingerprintSet{4: struct{}{}},
metric.LabelPair{
Name: "label_3",
Value: "value_3",
}: codable.FingerprintSet{1: struct{}{}},
},
},
}
p, closer := newTestPersistence(t)
defer closer.Close()
indexedFpsToMetrics := index.FingerprintMetricMapping{}
for i, b := range batches {
for fp, m := range b.fpToMetric {
p.indexMetric(fp, m)
if err := p.archiveMetric(fp, m, 1, 2); err != nil {
t.Fatal(err)
}
indexedFpsToMetrics[fp] = m
}
verifyIndexedState(i, t, b, indexedFpsToMetrics, p)
}
for i := len(batches) - 1; i >= 0; i-- {
b := batches[i]
verifyIndexedState(i, t, batches[i], indexedFpsToMetrics, p)
for fp, m := range b.fpToMetric {
p.unindexMetric(fp, m)
unarchived, firstTime, err := p.unarchiveMetric(fp)
if err != nil {
t.Fatal(err)
}
if !unarchived {
t.Errorf("%d. metric not unarchived", i)
}
if firstTime != 1 {
t.Errorf("%d. expected firstTime=1, got %v", i, firstTime)
}
delete(indexedFpsToMetrics, fp)
}
}
}
func verifyIndexedState(i int, t *testing.T, b incrementalBatch, indexedFpsToMetrics index.FingerprintMetricMapping, p *persistence) {
p.waitForIndexing()
for fp, m := range indexedFpsToMetrics {
// Compare archived metrics with input metrics.
mOut, err := p.getArchivedMetric(fp)
if err != nil {
t.Fatal(err)
}
if !mOut.Equal(m) {
t.Errorf("%d. %v: Got: %s; want %s", i, fp, mOut, m)
}
// Check that archived metrics are in membership index.
has, first, last, err := p.hasArchivedMetric(fp)
if err != nil {
t.Fatal(err)
}
if !has {
t.Errorf("%d. fingerprint %v not found", i, fp)
}
if first != 1 || last != 2 {
t.Errorf(
"%d. %v: Got first: %d, last %d; want first: %d, last %d",
i, fp, first, last, 1, 2,
)
}
}
// Compare label name -> label values mappings.
for ln, lvs := range b.expectedLnToLvs {
outLvs, err := p.getLabelValuesForLabelName(ln)
if err != nil {
t.Fatal(err)
}
outSet := codable.LabelValueSet{}
for _, lv := range outLvs {
outSet[lv] = struct{}{}
}
if !reflect.DeepEqual(lvs, outSet) {
t.Errorf("%d. label values don't match. Got: %v; want %v", i, outSet, lvs)
}
}
// Compare label pair -> fingerprints mappings.
for lp, fps := range b.expectedLpToFps {
outFPs, err := p.getFingerprintsForLabelPair(lp)
if err != nil {
t.Fatal(err)
}
outSet := codable.FingerprintSet{}
for _, fp := range outFPs {
outSet[fp] = struct{}{}
}
if !reflect.DeepEqual(fps, outSet) {
t.Errorf("%d. %v: fingerprints don't match. Got: %v; want %v", i, lp, outSet, fps)
}
}
}

111
storage/local/preload.go Normal file
View file

@ -0,0 +1,111 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"time"
clientmodel "github.com/prometheus/client_golang/model"
)
// memorySeriesPreloader is a Preloader for the memorySeriesStorage.
type memorySeriesPreloader struct {
storage *memorySeriesStorage
pinnedChunkDescs []*chunkDesc
}
// PreloadRange implements Preloader.
func (p *memorySeriesPreloader) PreloadRange(
fp clientmodel.Fingerprint,
from clientmodel.Timestamp, through clientmodel.Timestamp,
stalenessDelta time.Duration,
) error {
cds, err := p.storage.preloadChunksForRange(fp, from, through, stalenessDelta)
if err != nil {
return err
}
p.pinnedChunkDescs = append(p.pinnedChunkDescs, cds...)
return nil
}
/*
// GetMetricAtTime implements Preloader.
func (p *memorySeriesPreloader) GetMetricAtTime(fp clientmodel.Fingerprint, t clientmodel.Timestamp) error {
cds, err := p.storage.preloadChunks(fp, &timeSelector{
from: t,
through: t,
})
if err != nil {
return err
}
p.pinnedChunkDescs = append(p.pinnedChunkDescs, cds...)
return nil
}
// GetMetricAtInterval implements Preloader.
func (p *memorySeriesPreloader) GetMetricAtInterval(fp clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval time.Duration) error {
cds, err := p.storage.preloadChunks(fp, &timeSelector{
from: from,
through: through,
interval: interval,
})
if err != nil {
return err
}
p.pinnedChunkDescs = append(p.pinnedChunkDescs, cds...)
return
}
// GetMetricRange implements Preloader.
func (p *memorySeriesPreloader) GetMetricRange(fp clientmodel.Fingerprint, t clientmodel.Timestamp, rangeDuration time.Duration) error {
cds, err := p.storage.preloadChunks(fp, &timeSelector{
from: t,
through: t,
rangeDuration: through.Sub(from),
})
if err != nil {
return err
}
p.pinnedChunkDescs = append(p.pinnedChunkDescs, cds...)
return
}
// GetMetricRangeAtInterval implements Preloader.
func (p *memorySeriesPreloader) GetMetricRangeAtInterval(fp clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration) error {
cds, err := p.storage.preloadChunks(fp, &timeSelector{
from: from,
through: through,
interval: interval,
rangeDuration: rangeDuration,
})
if err != nil {
return err
}
p.pinnedChunkDescs = append(p.pinnedChunkDescs, cds...)
return
}
*/
// Close implements Preloader.
func (p *memorySeriesPreloader) Close() {
// TODO: Idea about a primitive but almost free heuristic to not evict
// "recently used" chunks: Do not unpin the chunks right here, but hand
// over the pinnedChunkDescs to a manager that will delay the unpinning
// based on time and memory pressure.
for _, cd := range p.pinnedChunkDescs {
cd.unpin(p.storage.evictRequests)
}
chunkOps.WithLabelValues(unpin).Add(float64(len(p.pinnedChunkDescs)))
}

570
storage/local/series.go Normal file
View file

@ -0,0 +1,570 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"math"
"sort"
"sync"
"sync/atomic"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
// chunkDescEvictionFactor is a factor used for chunkDesc eviction (as opposed
// to evictions of chunks, see method evictOlderThan. A chunk takes about 20x
// more memory than a chunkDesc. With a chunkDescEvictionFactor of 10, not more
// than a third of the total memory taken by a series will be used for
// chunkDescs.
const chunkDescEvictionFactor = 10
// fingerprintSeriesPair pairs a fingerprint with a memorySeries pointer.
type fingerprintSeriesPair struct {
fp clientmodel.Fingerprint
series *memorySeries
}
// seriesMap maps fingerprints to memory series. All its methods are
// goroutine-safe. A SeriesMap is effectively is a goroutine-safe version of
// map[clientmodel.Fingerprint]*memorySeries.
type seriesMap struct {
mtx sync.RWMutex
m map[clientmodel.Fingerprint]*memorySeries
}
// newSeriesMap returns a newly allocated empty seriesMap. To create a seriesMap
// based on a prefilled map, use an explicit initializer.
func newSeriesMap() *seriesMap {
return &seriesMap{m: make(map[clientmodel.Fingerprint]*memorySeries)}
}
// length returns the number of mappings in the seriesMap.
func (sm *seriesMap) length() int {
sm.mtx.RLock()
defer sm.mtx.RUnlock()
return len(sm.m)
}
// get returns a memorySeries for a fingerprint. Return values have the same
// semantics as the native Go map.
func (sm *seriesMap) get(fp clientmodel.Fingerprint) (s *memorySeries, ok bool) {
sm.mtx.RLock()
defer sm.mtx.RUnlock()
s, ok = sm.m[fp]
return
}
// put adds a mapping to the seriesMap. It panics if s == nil.
func (sm *seriesMap) put(fp clientmodel.Fingerprint, s *memorySeries) {
sm.mtx.Lock()
defer sm.mtx.Unlock()
if s == nil {
panic("tried to add nil pointer to seriesMap")
}
sm.m[fp] = s
}
// del removes a mapping from the series Map.
func (sm *seriesMap) del(fp clientmodel.Fingerprint) {
sm.mtx.Lock()
defer sm.mtx.Unlock()
delete(sm.m, fp)
}
// iter returns a channel that produces all mappings in the seriesMap. The
// channel will be closed once all fingerprints have been received. Not
// consuming all fingerprints from the channel will leak a goroutine. The
// semantics of concurrent modification of seriesMap is the similar as the one
// for iterating over a map with a 'range' clause. However, if the next element
// in iteration order is removed after the current element has been received
// from the channel, it will still be produced by the channel.
func (sm *seriesMap) iter() <-chan fingerprintSeriesPair {
ch := make(chan fingerprintSeriesPair)
go func() {
sm.mtx.RLock()
for fp, s := range sm.m {
sm.mtx.RUnlock()
ch <- fingerprintSeriesPair{fp, s}
sm.mtx.RLock()
}
sm.mtx.RUnlock()
close(ch)
}()
return ch
}
// fpIter returns a channel that produces all fingerprints in the seriesMap. The
// channel will be closed once all fingerprints have been received. Not
// consuming all fingerprints from the channel will leak a goroutine. The
// semantics of concurrent modification of seriesMap is the similar as the one
// for iterating over a map with a 'range' clause. However, if the next element
// in iteration order is removed after the current element has been received
// from the channel, it will still be produced by the channel.
func (sm *seriesMap) fpIter() <-chan clientmodel.Fingerprint {
ch := make(chan clientmodel.Fingerprint)
go func() {
sm.mtx.RLock()
for fp := range sm.m {
sm.mtx.RUnlock()
ch <- fp
sm.mtx.RLock()
}
sm.mtx.RUnlock()
close(ch)
}()
return ch
}
type memorySeries struct {
metric clientmodel.Metric
// Sorted by start time, overlapping chunk ranges are forbidden.
chunkDescs []*chunkDesc
// The chunkDescs in memory might not have all the chunkDescs for the
// chunks that are persisted to disk. The missing chunkDescs are all
// contiguous and at the tail end. chunkDescsOffset is the index of the
// chunk on disk that corresponds to the first chunkDesc in memory. If
// it is 0, the chunkDescs are all loaded. A value of -1 denotes a
// special case: There are chunks on disk, but the offset to the
// chunkDescs in memory is unknown. Also, there is no overlap between
// chunks on disk and chunks in memory (implying that upon first
// persisting of a chunk in memory, the offset has to be set).
chunkDescsOffset int
// The savedFirstTime field is used as a fallback when the
// chunkDescsOffset is not 0. It can be used to save the firstTime of the
// first chunk before its chunk desc is evicted. In doubt, this field is
// just set to the oldest possible timestamp.
savedFirstTime clientmodel.Timestamp
// Whether the current head chunk has already been scheduled to be
// persisted. If true, the current head chunk must not be modified
// anymore.
headChunkPersisted bool
// Whether the current head chunk is used by an iterator. In that case,
// a non-persisted head chunk has to be cloned before more samples are
// appended.
headChunkUsedByIterator bool
}
// newMemorySeries returns a pointer to a newly allocated memorySeries for the
// given metric. reallyNew defines if the memorySeries is a genuinely new series
// or (if false) a series for a metric being unarchived, i.e. a series that
// existed before but has been evicted from memory. If reallyNew is false,
// firstTime is ignored (and set to the lowest possible timestamp instead - it
// will be set properly upon the first eviction of chunkDescs).
func newMemorySeries(m clientmodel.Metric, reallyNew bool, firstTime clientmodel.Timestamp) *memorySeries {
if reallyNew {
firstTime = math.MinInt64
}
s := memorySeries{
metric: m,
headChunkPersisted: !reallyNew,
savedFirstTime: firstTime,
}
if !reallyNew {
s.chunkDescsOffset = -1
}
return &s
}
// add adds a sample pair to the series.
// It returns chunkDescs that must be queued to be persisted.
// The caller must have locked the fingerprint of the series.
func (s *memorySeries) add(fp clientmodel.Fingerprint, v *metric.SamplePair) []*chunkDesc {
if len(s.chunkDescs) == 0 || s.headChunkPersisted {
newHead := newChunkDesc(newDeltaEncodedChunk(d1, d0, true))
s.chunkDescs = append(s.chunkDescs, newHead)
s.headChunkPersisted = false
} else if s.headChunkUsedByIterator && s.head().getRefCount() > 1 {
// We only need to clone the head chunk if the current head
// chunk was used in an iterator at all and if the refCount is
// still greater than the 1 we always have because the head
// chunk is not yet persisted. The latter is just an
// approximation. We will still clone unnecessarily if an older
// iterator using a previous version of the head chunk is still
// around and keep the head chunk pinned. We needed to track
// pins by version of the head chunk, which is probably not
// worth the effort.
chunkOps.WithLabelValues(clone).Inc()
// No locking needed here because a non-persisted head chunk can
// not get evicted concurrently.
s.head().chunk = s.head().chunk.clone()
s.headChunkUsedByIterator = false
}
chunks := s.head().add(v)
s.head().chunk = chunks[0]
var chunkDescsToPersist []*chunkDesc
if len(chunks) > 1 {
chunkDescsToPersist = append(chunkDescsToPersist, s.head())
for i, c := range chunks[1:] {
cd := newChunkDesc(c)
s.chunkDescs = append(s.chunkDescs, cd)
// The last chunk is still growing.
if i < len(chunks[1:])-1 {
chunkDescsToPersist = append(chunkDescsToPersist, cd)
}
}
}
return chunkDescsToPersist
}
// evictChunkDescs evicts chunkDescs if there are chunkDescEvictionFactor times
// more than non-evicted chunks. iOldestNotEvicted is the index within the
// current chunkDescs of the oldest chunk that is not evicted.
func (s *memorySeries) evictChunkDescs(iOldestNotEvicted int) {
lenToKeep := chunkDescEvictionFactor * (len(s.chunkDescs) - iOldestNotEvicted)
if lenToKeep < len(s.chunkDescs) {
s.savedFirstTime = s.firstTime()
lenEvicted := len(s.chunkDescs) - lenToKeep
s.chunkDescsOffset += lenEvicted
chunkDescOps.WithLabelValues(evict).Add(float64(lenEvicted))
atomic.AddInt64(&numMemChunkDescs, -int64(lenEvicted))
s.chunkDescs = append(
make([]*chunkDesc, 0, lenToKeep),
s.chunkDescs[lenEvicted:]...,
)
}
}
// purgeOlderThan removes chunkDescs older than t. It returns the number of
// purged chunkDescs and true if all chunkDescs have been purged.
//
// The caller must have locked the fingerprint of the series.
func (s *memorySeries) purgeOlderThan(t clientmodel.Timestamp) (int, bool) {
keepIdx := len(s.chunkDescs)
for i, cd := range s.chunkDescs {
if !cd.lastTime().Before(t) {
keepIdx = i
break
}
}
if keepIdx > 0 {
s.chunkDescs = append(make([]*chunkDesc, 0, len(s.chunkDescs)-keepIdx), s.chunkDescs[keepIdx:]...)
atomic.AddInt64(&numMemChunkDescs, -int64(keepIdx))
}
return keepIdx, len(s.chunkDescs) == 0
}
// preloadChunks is an internal helper method.
func (s *memorySeries) preloadChunks(indexes []int, mss *memorySeriesStorage) ([]*chunkDesc, error) {
loadIndexes := []int{}
pinnedChunkDescs := make([]*chunkDesc, 0, len(indexes))
for _, idx := range indexes {
cd := s.chunkDescs[idx]
pinnedChunkDescs = append(pinnedChunkDescs, cd)
cd.pin(mss.evictRequests) // Have to pin everything first to prevent immediate eviction on chunk loading.
if cd.isEvicted() {
loadIndexes = append(loadIndexes, idx)
}
}
chunkOps.WithLabelValues(pin).Add(float64(len(pinnedChunkDescs)))
if len(loadIndexes) > 0 {
if s.chunkDescsOffset == -1 {
panic("requested loading chunks from persistence in a situation where we must not have persisted data for chunk descriptors in memory")
}
fp := s.metric.Fingerprint()
// TODO: Remove law-of-Demeter violation?
chunks, err := mss.persistence.loadChunks(fp, loadIndexes, s.chunkDescsOffset)
if err != nil {
// Unpin the chunks since we won't return them as pinned chunks now.
for _, cd := range pinnedChunkDescs {
cd.unpin(mss.evictRequests)
}
chunkOps.WithLabelValues(unpin).Add(float64(len(pinnedChunkDescs)))
return nil, err
}
for i, c := range chunks {
s.chunkDescs[loadIndexes[i]].setChunk(c)
}
chunkOps.WithLabelValues(load).Add(float64(len(chunks)))
atomic.AddInt64(&numMemChunks, int64(len(chunks)))
}
return pinnedChunkDescs, nil
}
/*
func (s *memorySeries) preloadChunksAtTime(t clientmodel.Timestamp, p *persistence) (chunkDescs, error) {
s.mtx.Lock()
defer s.mtx.Unlock()
if len(s.chunkDescs) == 0 {
return nil, nil
}
var pinIndexes []int
// Find first chunk where lastTime() is after or equal to t.
i := sort.Search(len(s.chunkDescs), func(i int) bool {
return !s.chunkDescs[i].lastTime().Before(t)
})
switch i {
case 0:
pinIndexes = []int{0}
case len(s.chunkDescs):
pinIndexes = []int{i - 1}
default:
if s.chunkDescs[i].contains(t) {
pinIndexes = []int{i}
} else {
pinIndexes = []int{i - 1, i}
}
}
return s.preloadChunks(pinIndexes, p)
}
*/
// preloadChunksForRange loads chunks for the given range from the persistence.
// The caller must have locked the fingerprint of the series.
func (s *memorySeries) preloadChunksForRange(
from clientmodel.Timestamp, through clientmodel.Timestamp,
fp clientmodel.Fingerprint, mss *memorySeriesStorage,
) ([]*chunkDesc, error) {
firstChunkDescTime := clientmodel.Timestamp(math.MaxInt64)
if len(s.chunkDescs) > 0 {
firstChunkDescTime = s.chunkDescs[0].firstTime()
}
if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) {
// TODO: Remove law-of-demeter violation?
cds, err := mss.persistence.loadChunkDescs(fp, firstChunkDescTime)
if err != nil {
return nil, err
}
s.chunkDescs = append(cds, s.chunkDescs...)
s.chunkDescsOffset = 0
}
if len(s.chunkDescs) == 0 {
return nil, nil
}
// Find first chunk with start time after "from".
fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
return s.chunkDescs[i].firstTime().After(from)
})
// Find first chunk with start time after "through".
throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool {
return s.chunkDescs[i].firstTime().After(through)
})
if fromIdx > 0 {
fromIdx--
}
if throughIdx == len(s.chunkDescs) {
throughIdx--
}
pinIndexes := make([]int, 0, throughIdx-fromIdx+1)
for i := fromIdx; i <= throughIdx; i++ {
pinIndexes = append(pinIndexes, i)
}
return s.preloadChunks(pinIndexes, mss)
}
// newIterator returns a new SeriesIterator. The caller must have locked the
// fingerprint of the memorySeries.
func (s *memorySeries) newIterator(lockFunc, unlockFunc func()) SeriesIterator {
chunks := make([]chunk, 0, len(s.chunkDescs))
for i, cd := range s.chunkDescs {
if !cd.isEvicted() {
if i == len(s.chunkDescs)-1 && !s.headChunkPersisted {
s.headChunkUsedByIterator = true
}
chunks = append(chunks, cd.chunk)
}
}
return &memorySeriesIterator{
lock: lockFunc,
unlock: unlockFunc,
chunks: chunks,
}
}
// head returns a pointer to the head chunk descriptor. The caller must have
// locked the fingerprint of the memorySeries.
func (s *memorySeries) head() *chunkDesc {
return s.chunkDescs[len(s.chunkDescs)-1]
}
// firstTime returns the timestamp of the first sample in the series. The caller
// must have locked the fingerprint of the memorySeries.
func (s *memorySeries) firstTime() clientmodel.Timestamp {
if s.chunkDescsOffset == 0 && len(s.chunkDescs) > 0 {
return s.chunkDescs[0].firstTime()
}
return s.savedFirstTime
}
// lastTime returns the timestamp of the last sample in the series. The caller
// must have locked the fingerprint of the memorySeries.
func (s *memorySeries) lastTime() clientmodel.Timestamp {
return s.head().lastTime()
}
// memorySeriesIterator implements SeriesIterator.
type memorySeriesIterator struct {
lock, unlock func()
chunkIt chunkIterator
chunks []chunk
}
// GetValueAtTime implements SeriesIterator.
func (it *memorySeriesIterator) GetValueAtTime(t clientmodel.Timestamp) metric.Values {
it.lock()
defer it.unlock()
// The most common case. We are iterating through a chunk.
if it.chunkIt != nil && it.chunkIt.contains(t) {
return it.chunkIt.getValueAtTime(t)
}
it.chunkIt = nil
if len(it.chunks) == 0 {
return nil
}
// Before or exactly on the first sample of the series.
if !t.After(it.chunks[0].firstTime()) {
// return first value of first chunk
return it.chunks[0].newIterator().getValueAtTime(t)
}
// After or exactly on the last sample of the series.
if !t.Before(it.chunks[len(it.chunks)-1].lastTime()) {
// return last value of last chunk
return it.chunks[len(it.chunks)-1].newIterator().getValueAtTime(t)
}
// Find first chunk where lastTime() is after or equal to t.
i := sort.Search(len(it.chunks), func(i int) bool {
return !it.chunks[i].lastTime().Before(t)
})
if i == len(it.chunks) {
panic("out of bounds")
}
if t.Before(it.chunks[i].firstTime()) {
// We ended up between two chunks.
return metric.Values{
it.chunks[i-1].newIterator().getValueAtTime(t)[0],
it.chunks[i].newIterator().getValueAtTime(t)[0],
}
}
// We ended up in the middle of a chunk. We might stay there for a while,
// so save it as the current chunk iterator.
it.chunkIt = it.chunks[i].newIterator()
return it.chunkIt.getValueAtTime(t)
}
// GetBoundaryValues implements SeriesIterator.
func (it *memorySeriesIterator) GetBoundaryValues(in metric.Interval) metric.Values {
it.lock()
defer it.unlock()
// Find the first relevant chunk.
i := sort.Search(len(it.chunks), func(i int) bool {
return !it.chunks[i].lastTime().Before(in.OldestInclusive)
})
values := make(metric.Values, 0, 2)
for i, c := range it.chunks[i:] {
var chunkIt chunkIterator
if c.firstTime().After(in.NewestInclusive) {
if len(values) == 1 {
// We found the first value before, but are now
// already past the last value. The value we
// want must be the last value of the previous
// chunk. So backtrack...
chunkIt = it.chunks[i-1].newIterator()
values = append(values, chunkIt.getValueAtTime(in.NewestInclusive)[0])
}
break
}
if len(values) == 0 {
chunkIt = c.newIterator()
firstValues := chunkIt.getValueAtTime(in.OldestInclusive)
switch len(firstValues) {
case 2:
values = append(values, firstValues[1])
case 1:
values = firstValues
default:
panic("unexpected return from getValueAtTime")
}
}
if c.lastTime().After(in.NewestInclusive) {
if chunkIt == nil {
chunkIt = c.newIterator()
}
values = append(values, chunkIt.getValueAtTime(in.NewestInclusive)[0])
break
}
}
if len(values) == 1 {
// We found exactly one value. In that case, add the most recent we know.
values = append(
values,
it.chunks[len(it.chunks)-1].newIterator().getValueAtTime(in.NewestInclusive)[0],
)
}
if len(values) == 2 && values[0].Equal(&values[1]) {
return values[:1]
}
return values
}
// GetRangeValues implements SeriesIterator.
func (it *memorySeriesIterator) GetRangeValues(in metric.Interval) metric.Values {
it.lock()
defer it.unlock()
// Find the first relevant chunk.
i := sort.Search(len(it.chunks), func(i int) bool {
return !it.chunks[i].lastTime().Before(in.OldestInclusive)
})
values := metric.Values{}
for _, c := range it.chunks[i:] {
if c.firstTime().After(in.NewestInclusive) {
break
}
// TODO: actually reuse an iterator between calls if we get multiple ranges
// from the same chunk.
values = append(values, c.newIterator().getRangeValues(in)...)
}
return values
}
// nopSeriesIterator implements Series Iterator. It never returns any values.
type nopSeriesIterator struct{}
// GetValueAtTime implements SeriesIterator.
func (_ nopSeriesIterator) GetValueAtTime(t clientmodel.Timestamp) metric.Values {
return metric.Values{}
}
// GetBoundaryValues implements SeriesIterator.
func (_ nopSeriesIterator) GetBoundaryValues(in metric.Interval) metric.Values {
return metric.Values{}
}
// GetRangeValues implements SeriesIterator.
func (_ nopSeriesIterator) GetRangeValues(in metric.Interval) metric.Values {
return metric.Values{}
}

825
storage/local/storage.go Normal file
View file

@ -0,0 +1,825 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package local contains the local time series storage used by Prometheus.
package local
import (
"container/list"
"sync/atomic"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const (
persistQueueCap = 1024
evictRequestsCap = 1024
chunkLen = 1024
// See waitForNextFP.
fpMaxWaitDuration = 10 * time.Second
fpMinWaitDuration = 5 * time.Millisecond // ~ hard disk seek time.
fpMaxSweepTime = 6 * time.Hour
maxEvictInterval = time.Minute
headChunkTimeout = time.Hour // Close head chunk if not touched for that long.
)
type storageState uint
const (
storageStarting storageState = iota
storageServing
storageStopping
)
type persistRequest struct {
fingerprint clientmodel.Fingerprint
chunkDesc *chunkDesc
}
type evictRequest struct {
cd *chunkDesc
evict bool
}
type memorySeriesStorage struct {
fpLocker *fingerprintLocker
fpToSeries *seriesMap
loopStopping, loopStopped chan struct{}
maxMemoryChunks int
purgeAfter time.Duration
checkpointInterval time.Duration
persistQueue chan persistRequest
persistStopped chan struct{}
persistence *persistence
evictList *list.List
evictRequests chan evictRequest
evictStopping, evictStopped chan struct{}
persistLatency prometheus.Summary
persistErrors *prometheus.CounterVec
persistQueueLength prometheus.Gauge
numSeries prometheus.Gauge
seriesOps *prometheus.CounterVec
ingestedSamplesCount prometheus.Counter
invalidPreloadRequestsCount prometheus.Counter
purgeDuration prometheus.Gauge
}
// MemorySeriesStorageOptions contains options needed by
// NewMemorySeriesStorage. It is not safe to leave any of those at their zero
// values.
type MemorySeriesStorageOptions struct {
MemoryChunks int // How many chunks to keep in memory.
PersistenceStoragePath string // Location of persistence files.
PersistenceRetentionPeriod time.Duration // Chunks at least that old are purged.
CheckpointInterval time.Duration // How often to checkpoint the series map and head chunks.
Dirty bool // Force the storage to consider itself dirty on startup.
}
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still
// has to be called to start the storage.
func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) (Storage, error) {
p, err := newPersistence(o.PersistenceStoragePath, chunkLen, o.Dirty)
if err != nil {
return nil, err
}
glog.Info("Loading series map and head chunks...")
fpToSeries, err := p.loadSeriesMapAndHeads()
if err != nil {
return nil, err
}
glog.Infof("%d series loaded.", fpToSeries.length())
numSeries := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "memory_series",
Help: "The current number of series in memory.",
})
numSeries.Set(float64(fpToSeries.length()))
return &memorySeriesStorage{
fpLocker: newFingerprintLocker(256),
fpToSeries: fpToSeries,
loopStopping: make(chan struct{}),
loopStopped: make(chan struct{}),
maxMemoryChunks: o.MemoryChunks,
purgeAfter: o.PersistenceRetentionPeriod,
checkpointInterval: o.CheckpointInterval,
persistQueue: make(chan persistRequest, persistQueueCap),
persistStopped: make(chan struct{}),
persistence: p,
evictList: list.New(),
evictRequests: make(chan evictRequest, evictRequestsCap),
evictStopping: make(chan struct{}),
evictStopped: make(chan struct{}),
persistLatency: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_latency_microseconds",
Help: "A summary of latencies for persisting each chunk.",
}),
persistErrors: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_errors_total",
Help: "A counter of errors persisting chunks.",
},
[]string{"error"},
),
persistQueueLength: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "persist_queue_length",
Help: "The current number of chunks waiting in the persist queue.",
}),
numSeries: numSeries,
seriesOps: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "series_ops_total",
Help: "The total number of series operations by their type.",
},
[]string{opTypeLabel},
),
ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "ingested_samples_total",
Help: "The total number of samples ingested.",
}),
invalidPreloadRequestsCount: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_preload_requests_total",
Help: "The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.",
}),
}, nil
}
// Start implements Storage.
func (s *memorySeriesStorage) Start() {
go s.handleEvictList()
go s.handlePersistQueue()
go s.loop()
}
// Stop implements Storage.
func (s *memorySeriesStorage) Stop() error {
glog.Info("Stopping local storage...")
glog.Info("Stopping maintenance loop...")
close(s.loopStopping)
<-s.loopStopped
glog.Info("Stopping persist queue...")
close(s.persistQueue)
<-s.persistStopped
glog.Info("Stopping chunk eviction...")
close(s.evictStopping)
<-s.evictStopped
// One final checkpoint of the series map and the head chunks.
if err := s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker); err != nil {
return err
}
if err := s.persistence.close(); err != nil {
return err
}
glog.Info("Local storage stopped.")
return nil
}
// WaitForIndexing implements Storage.
func (s *memorySeriesStorage) WaitForIndexing() {
s.persistence.waitForIndexing()
}
// NewIterator implements storage.
func (s *memorySeriesStorage) NewIterator(fp clientmodel.Fingerprint) SeriesIterator {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
// Oops, no series for fp found. That happens if, after
// preloading is done, the whole series is identified as old
// enough for purging and hence purged for good. As there is no
// data left to iterate over, return an iterator that will never
// return any values.
return nopSeriesIterator{}
}
return series.newIterator(
func() { s.fpLocker.Lock(fp) },
func() { s.fpLocker.Unlock(fp) },
)
}
// NewPreloader implements Storage.
func (s *memorySeriesStorage) NewPreloader() Preloader {
return &memorySeriesPreloader{
storage: s,
}
}
// GetFingerprintsForLabelMatchers implements Storage.
func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) clientmodel.Fingerprints {
var result map[clientmodel.Fingerprint]struct{}
for _, matcher := range labelMatchers {
intersection := map[clientmodel.Fingerprint]struct{}{}
switch matcher.Type {
case metric.Equal:
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
if len(fps) == 0 {
return nil
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
default:
values, err := s.persistence.getLabelValuesForLabelName(matcher.Name)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", matcher.Name, err)
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil
}
for _, v := range matches {
fps, err := s.persistence.getFingerprintsForLabelPair(
metric.LabelPair{
Name: matcher.Name,
Value: v,
},
)
if err != nil {
glog.Error("Error getting fingerprints for label pair: ", err)
}
for _, fp := range fps {
if _, ok := result[fp]; ok || result == nil {
intersection[fp] = struct{}{}
}
}
}
}
if len(intersection) == 0 {
return nil
}
result = intersection
}
fps := make(clientmodel.Fingerprints, 0, len(result))
for fp := range result {
fps = append(fps, fp)
}
return fps
}
// GetLabelValuesForLabelName implements Storage.
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) clientmodel.LabelValues {
lvs, err := s.persistence.getLabelValuesForLabelName(labelName)
if err != nil {
glog.Errorf("Error getting label values for label name %q: %v", labelName, err)
}
return lvs
}
// GetMetricForFingerprint implements Storage.
func (s *memorySeriesStorage) GetMetricForFingerprint(fp clientmodel.Fingerprint) clientmodel.Metric {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if ok {
// Copy required here because caller might mutate the returned
// metric.
m := make(clientmodel.Metric, len(series.metric))
for ln, lv := range series.metric {
m[ln] = lv
}
return m
}
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
glog.Errorf("Error retrieving archived metric for fingerprint %v: %v", fp, err)
}
return metric
}
// AppendSamples implements Storage.
func (s *memorySeriesStorage) AppendSamples(samples clientmodel.Samples) {
for _, sample := range samples {
s.appendSample(sample)
}
s.ingestedSamplesCount.Add(float64(len(samples)))
}
func (s *memorySeriesStorage) appendSample(sample *clientmodel.Sample) {
fp := sample.Metric.Fingerprint()
s.fpLocker.Lock(fp)
series := s.getOrCreateSeries(fp, sample.Metric)
chunkDescsToPersist := series.add(fp, &metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
})
s.fpLocker.Unlock(fp)
// Queue only outside of the locked area, processing the persistQueue
// requires the same lock!
for _, cd := range chunkDescsToPersist {
s.persistQueue <- persistRequest{fp, cd}
}
}
func (s *memorySeriesStorage) getOrCreateSeries(fp clientmodel.Fingerprint, m clientmodel.Metric) *memorySeries {
series, ok := s.fpToSeries.get(fp)
if !ok {
unarchived, firstTime, err := s.persistence.unarchiveMetric(fp)
if err != nil {
glog.Errorf("Error unarchiving fingerprint %v: %v", fp, err)
}
if unarchived {
s.seriesOps.WithLabelValues(unarchive).Inc()
} else {
// This was a genuinely new series, so index the metric.
s.persistence.indexMetric(fp, m)
s.seriesOps.WithLabelValues(create).Inc()
}
series = newMemorySeries(m, !unarchived, firstTime)
s.fpToSeries.put(fp, series)
s.numSeries.Inc()
}
return series
}
func (s *memorySeriesStorage) preloadChunksForRange(
fp clientmodel.Fingerprint,
from clientmodel.Timestamp, through clientmodel.Timestamp,
stalenessDelta time.Duration,
) ([]*chunkDesc, error) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
series, ok := s.fpToSeries.get(fp)
if !ok {
has, first, last, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
return nil, err
}
if !has {
s.invalidPreloadRequestsCount.Inc()
return nil, nil
}
if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) {
metric, err := s.persistence.getArchivedMetric(fp)
if err != nil {
return nil, err
}
series = s.getOrCreateSeries(fp, metric)
} else {
return nil, nil
}
}
return series.preloadChunksForRange(from, through, fp, s)
}
func (s *memorySeriesStorage) handleEvictList() {
ticker := time.NewTicker(maxEvictInterval)
count := 0
loop:
for {
// To batch up evictions a bit, this tries evictions at least
// once per evict interval, but earlier if the number of evict
// requests with evict==true that have happened since the last
// evict run is more than maxMemoryChunks/1000.
select {
case req := <-s.evictRequests:
if req.evict {
req.cd.evictListElement = s.evictList.PushBack(req.cd)
count++
if count > s.maxMemoryChunks/1000 {
s.maybeEvict()
count = 0
}
} else {
if req.cd.evictListElement != nil {
s.evictList.Remove(req.cd.evictListElement)
req.cd.evictListElement = nil
}
}
case <-ticker.C:
if s.evictList.Len() > 0 {
s.maybeEvict()
}
case <-s.evictStopping:
break loop
}
}
ticker.Stop()
glog.Info("Chunk eviction stopped.")
close(s.evictStopped)
}
// maybeEvict is a local helper method. Must only be called by handleEvictList.
func (s *memorySeriesStorage) maybeEvict() {
numChunksToEvict := int(atomic.LoadInt64(&numMemChunks)) - s.maxMemoryChunks
if numChunksToEvict <= 0 {
return
}
chunkDescsToEvict := make([]*chunkDesc, numChunksToEvict)
for i := range chunkDescsToEvict {
e := s.evictList.Front()
if e == nil {
break
}
cd := e.Value.(*chunkDesc)
cd.evictListElement = nil
chunkDescsToEvict[i] = cd
s.evictList.Remove(e)
}
// Do the actual eviction in a goroutine as we might otherwise deadlock,
// in the following way: A chunk was unpinned completely and therefore
// scheduled for eviction. At the time we actually try to evict it,
// another goroutine is pinning the chunk. The pinning goroutine has
// currently locked the chunk and tries to send the evict request (to
// remove the chunk from the evict list) to the evictRequests
// channel. The send blocks because evictRequests is full. However, the
// goroutine that is supposed to empty the channel is waiting for the
// chunkDesc lock to try to evict the chunk.
go func() {
for _, cd := range chunkDescsToEvict {
if cd == nil {
break
}
cd.maybeEvict()
// We don't care if the eviction succeeds. If the chunk
// was pinned in the meantime, it will be added to the
// evict list once it gets unpinned again.
}
}()
}
func (s *memorySeriesStorage) handlePersistQueue() {
for req := range s.persistQueue {
s.persistQueueLength.Set(float64(len(s.persistQueue)))
start := time.Now()
s.fpLocker.Lock(req.fingerprint)
offset, err := s.persistence.persistChunk(req.fingerprint, req.chunkDesc.chunk)
if series, seriesInMemory := s.fpToSeries.get(req.fingerprint); err == nil && seriesInMemory && series.chunkDescsOffset == -1 {
// This is the first chunk persisted for a newly created
// series that had prior chunks on disk. Finally, we can
// set the chunkDescsOffset.
series.chunkDescsOffset = offset
}
s.fpLocker.Unlock(req.fingerprint)
s.persistLatency.Observe(float64(time.Since(start)) / float64(time.Microsecond))
if err != nil {
s.persistErrors.WithLabelValues(err.Error()).Inc()
glog.Error("Error persisting chunk: ", err)
s.persistence.setDirty(true)
continue
}
req.chunkDesc.unpin(s.evictRequests)
chunkOps.WithLabelValues(persistAndUnpin).Inc()
}
glog.Info("Persist queue drained and stopped.")
close(s.persistStopped)
}
// waitForNextFP waits an estimated duration, after which we want to process
// another fingerprint so that we will process all fingerprints in a tenth of
// s.purgeAfter assuming that the system is doing nothing else, e.g. if we want
// to purge after 40h, we want to cycle through all fingerprints within
// 4h. However, the maximum sweep time is capped at fpMaxSweepTime. Furthermore,
// this method will always wait for at least fpMinWaitDuration and never longer
// than fpMaxWaitDuration. If s.loopStopped is closed, it will return false
// immediately. The estimation is based on the total number of fingerprints as
// passed in.
func (s *memorySeriesStorage) waitForNextFP(numberOfFPs int) bool {
d := fpMaxWaitDuration
if numberOfFPs != 0 {
sweepTime := s.purgeAfter / 10
if sweepTime > fpMaxSweepTime {
sweepTime = fpMaxSweepTime
}
d = sweepTime / time.Duration(numberOfFPs)
if d < fpMinWaitDuration {
d = fpMinWaitDuration
}
if d > fpMaxWaitDuration {
d = fpMaxWaitDuration
}
}
t := time.NewTimer(d)
select {
case <-t.C:
return true
case <-s.loopStopping:
return false
}
}
// cycleThroughMemoryFingerprints returns a channel that emits fingerprints for
// series in memory in a throttled fashion. It continues to cycle through all
// fingerprints in memory until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughMemoryFingerprints() chan clientmodel.Fingerprint {
memoryFingerprints := make(chan clientmodel.Fingerprint)
go func() {
var fpIter <-chan clientmodel.Fingerprint
defer func() {
if fpIter != nil {
for _ = range fpIter {
// Consume the iterator.
}
}
close(memoryFingerprints)
}()
for {
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(s.fpToSeries.length()) {
return
}
begin := time.Now()
fpIter = s.fpToSeries.fpIter()
for fp := range fpIter {
select {
case memoryFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(s.fpToSeries.length())
}
glog.Infof("Completed maintenance sweep through in-memory fingerprints in %v.", time.Since(begin))
}
}()
return memoryFingerprints
}
// cycleThroughArchivedFingerprints returns a channel that emits fingerprints
// for archived series in a throttled fashion. It continues to cycle through all
// archived fingerprints until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmodel.Fingerprint {
archivedFingerprints := make(chan clientmodel.Fingerprint)
go func() {
defer close(archivedFingerprints)
for {
archivedFPs, err := s.persistence.getFingerprintsModifiedBefore(
clientmodel.TimestampFromTime(time.Now()).Add(-1 * s.purgeAfter),
)
if err != nil {
glog.Error("Failed to lookup archived fingerprint ranges: ", err)
s.waitForNextFP(0)
continue
}
// Initial wait, also important if there are no FPs yet.
if !s.waitForNextFP(len(archivedFPs)) {
return
}
begin := time.Now()
for _, fp := range archivedFPs {
select {
case archivedFingerprints <- fp:
case <-s.loopStopping:
return
}
s.waitForNextFP(len(archivedFPs))
}
glog.Infof("Completed maintenance sweep through archived fingerprints in %v.", time.Since(begin))
}
}()
return archivedFingerprints
}
func (s *memorySeriesStorage) loop() {
checkpointTicker := time.NewTicker(s.checkpointInterval)
defer func() {
checkpointTicker.Stop()
glog.Info("Maintenance loop stopped.")
close(s.loopStopped)
}()
memoryFingerprints := s.cycleThroughMemoryFingerprints()
archivedFingerprints := s.cycleThroughArchivedFingerprints()
loop:
for {
select {
case <-s.loopStopping:
break loop
case <-checkpointTicker.C:
s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
case fp := <-memoryFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.maintainSeries(fp)
s.seriesOps.WithLabelValues(memoryMaintenance).Inc()
case fp := <-archivedFingerprints:
s.purgeSeries(fp, clientmodel.TimestampFromTime(time.Now()).Add(-1*s.purgeAfter))
s.seriesOps.WithLabelValues(archiveMaintenance).Inc()
}
}
// Wait until both channels are closed.
for _ = range memoryFingerprints {
}
for _ = range archivedFingerprints {
}
}
// maintainSeries closes the head chunk if not touched in a while. It archives a
// series if all chunks are evicted. It evicts chunkDescs if there are too many.
func (s *memorySeriesStorage) maintainSeries(fp clientmodel.Fingerprint) {
var headChunkToPersist *chunkDesc
s.fpLocker.Lock(fp)
defer func() {
s.fpLocker.Unlock(fp)
// Queue outside of lock!
if headChunkToPersist != nil {
s.persistQueue <- persistRequest{fp, headChunkToPersist}
}
}()
series, ok := s.fpToSeries.get(fp)
if !ok {
return
}
iOldestNotEvicted := -1
for i, cd := range series.chunkDescs {
if !cd.isEvicted() {
iOldestNotEvicted = i
break
}
}
// Archive if all chunks are evicted.
if iOldestNotEvicted == -1 {
s.fpToSeries.del(fp)
s.numSeries.Dec()
if err := s.persistence.archiveMetric(
fp, series.metric, series.firstTime(), series.lastTime(),
); err != nil {
glog.Errorf("Error archiving metric %v: %v", series.metric, err)
} else {
s.seriesOps.WithLabelValues(archive).Inc()
}
return
}
// If we are here, the series is not archived, so check for chunkDesc
// eviction next and then if the head chunk needs to be persisted.
series.evictChunkDescs(iOldestNotEvicted)
if !series.headChunkPersisted && time.Now().Sub(series.head().firstTime().Time()) > headChunkTimeout {
series.headChunkPersisted = true
// Since we cannot modify the head chunk from now on, we
// don't need to bother with cloning anymore.
series.headChunkUsedByIterator = false
headChunkToPersist = series.head()
}
}
// purgeSeries purges chunks older than beforeTime from a series. If the series
// contains no chunks after the purge, it is dropped entirely.
func (s *memorySeriesStorage) purgeSeries(fp clientmodel.Fingerprint, beforeTime clientmodel.Timestamp) {
s.fpLocker.Lock(fp)
defer s.fpLocker.Unlock(fp)
if series, ok := s.fpToSeries.get(fp); ok {
// Deal with series in memory.
if !series.firstTime().Before(beforeTime) {
// Oldest sample not old enough.
return
}
newFirstTime, numDropped, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
numPurged, allPurged := series.purgeOlderThan(beforeTime)
if allPurged && allDropped {
s.fpToSeries.del(fp)
s.numSeries.Dec()
s.seriesOps.WithLabelValues(memoryPurge).Inc()
s.persistence.unindexMetric(fp, series.metric)
} else if series.chunkDescsOffset != -1 {
series.savedFirstTime = newFirstTime
series.chunkDescsOffset += numPurged - numDropped
if series.chunkDescsOffset < 0 {
panic("dropped more chunks from persistence than from memory")
}
}
return
}
// Deal with archived series.
has, firstTime, lastTime, err := s.persistence.hasArchivedMetric(fp)
if err != nil {
glog.Error("Error looking up archived time range: ", err)
return
}
if !has || !firstTime.Before(beforeTime) {
// Oldest sample not old enough, or metric purged or unarchived in the meantime.
return
}
newFirstTime, _, allDropped, err := s.persistence.dropChunks(fp, beforeTime)
if err != nil {
glog.Error("Error purging persisted chunks: ", err)
}
if allDropped {
if err := s.persistence.dropArchivedMetric(fp); err != nil {
glog.Errorf("Error dropping archived metric for fingerprint %v: %v", fp, err)
return
}
s.seriesOps.WithLabelValues(archivePurge).Inc()
return
}
s.persistence.updateArchivedTimeRange(fp, newFirstTime, lastTime)
}
// To expose persistQueueCap as metric:
var (
persistQueueCapDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, subsystem, "persist_queue_capacity"),
"The total capacity of the persist queue.",
nil, nil,
)
persistQueueCapGauge = prometheus.MustNewConstMetric(
persistQueueCapDesc, prometheus.GaugeValue, persistQueueCap,
)
)
// Describe implements prometheus.Collector.
func (s *memorySeriesStorage) Describe(ch chan<- *prometheus.Desc) {
s.persistence.Describe(ch)
ch <- s.persistLatency.Desc()
s.persistErrors.Describe(ch)
ch <- s.persistQueueLength.Desc()
ch <- s.numSeries.Desc()
s.seriesOps.Describe(ch)
ch <- s.ingestedSamplesCount.Desc()
ch <- s.invalidPreloadRequestsCount.Desc()
ch <- persistQueueCapDesc
ch <- numMemChunksDesc
ch <- numMemChunkDescsDesc
}
// Collect implements prometheus.Collector.
func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) {
s.persistence.Collect(ch)
ch <- s.persistLatency
s.persistErrors.Collect(ch)
ch <- s.persistQueueLength
ch <- s.numSeries
s.seriesOps.Collect(ch)
ch <- s.ingestedSamplesCount
ch <- s.invalidPreloadRequestsCount
ch <- persistQueueCapGauge
count := atomic.LoadInt64(&numMemChunks)
ch <- prometheus.MustNewConstMetric(numMemChunksDesc, prometheus.GaugeValue, float64(count))
count = atomic.LoadInt64(&numMemChunkDescs)
ch <- prometheus.MustNewConstMetric(numMemChunkDescsDesc, prometheus.GaugeValue, float64(count))
}

View file

@ -0,0 +1,658 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"fmt"
"math/rand"
"testing"
"testing/quick"
"time"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility/test"
)
func TestGetFingerprintsForLabelMatchers(t *testing.T) {
}
// TestLoop is just a smoke test for the loop method, if we can switch it on and
// off without disaster.
func TestLoop(t *testing.T) {
samples := make(clientmodel.Samples, 1000)
for i := range samples {
samples[i] = &clientmodel.Sample{
Timestamp: clientmodel.Timestamp(2 * i),
Value: clientmodel.SampleValue(float64(i) * 0.2),
}
}
directory := test.NewTemporaryDirectory("test_storage", t)
defer directory.Close()
o := &MemorySeriesStorageOptions{
MemoryChunks: 50,
PersistenceRetentionPeriod: 24 * 7 * time.Hour,
PersistenceStoragePath: directory.Path(),
CheckpointInterval: 250 * time.Millisecond,
}
storage, err := NewMemorySeriesStorage(o)
if err != nil {
t.Fatalf("Error creating storage: %s", err)
}
storage.Start()
storage.AppendSamples(samples)
time.Sleep(time.Second)
storage.Stop()
}
func TestChunk(t *testing.T) {
samples := make(clientmodel.Samples, 500000)
for i := range samples {
samples[i] = &clientmodel.Sample{
Timestamp: clientmodel.Timestamp(i),
Value: clientmodel.SampleValue(float64(i) * 0.2),
}
}
s, closer := NewTestStorage(t)
defer closer.Close()
s.AppendSamples(samples)
for m := range s.(*memorySeriesStorage).fpToSeries.iter() {
s.(*memorySeriesStorage).fpLocker.Lock(m.fp)
var values metric.Values
for _, cd := range m.series.chunkDescs {
if cd.isEvicted() {
continue
}
for sample := range cd.chunk.values() {
values = append(values, *sample)
}
}
for i, v := range values {
if samples[i].Timestamp != v.Timestamp {
t.Errorf("%d. Got %v; want %v", i, v.Timestamp, samples[i].Timestamp)
}
if samples[i].Value != v.Value {
t.Errorf("%d. Got %v; want %v", i, v.Value, samples[i].Value)
}
}
s.(*memorySeriesStorage).fpLocker.Unlock(m.fp)
}
glog.Info("test done, closing")
}
func TestGetValueAtTime(t *testing.T) {
samples := make(clientmodel.Samples, 1000)
for i := range samples {
samples[i] = &clientmodel.Sample{
Timestamp: clientmodel.Timestamp(2 * i),
Value: clientmodel.SampleValue(float64(i) * 0.2),
}
}
s, closer := NewTestStorage(t)
defer closer.Close()
s.AppendSamples(samples)
fp := clientmodel.Metric{}.Fingerprint()
it := s.NewIterator(fp)
// #1 Exactly on a sample.
for i, expected := range samples {
actual := it.GetValueAtTime(expected.Timestamp)
if len(actual) != 1 {
t.Fatalf("1.%d. Expected exactly one result, got %d.", i, len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("1.%d. Got %v; want %v", i, actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("1.%d. Got %v; want %v", i, actual[0].Value, expected.Value)
}
}
// #2 Between samples.
for i, expected1 := range samples {
if i == len(samples)-1 {
continue
}
expected2 := samples[i+1]
actual := it.GetValueAtTime(expected1.Timestamp + 1)
if len(actual) != 2 {
t.Fatalf("2.%d. Expected exactly 2 results, got %d.", i, len(actual))
}
if expected1.Timestamp != actual[0].Timestamp {
t.Errorf("2.%d. Got %v; want %v", i, actual[0].Timestamp, expected1.Timestamp)
}
if expected1.Value != actual[0].Value {
t.Errorf("2.%d. Got %v; want %v", i, actual[0].Value, expected1.Value)
}
if expected2.Timestamp != actual[1].Timestamp {
t.Errorf("2.%d. Got %v; want %v", i, actual[1].Timestamp, expected1.Timestamp)
}
if expected2.Value != actual[1].Value {
t.Errorf("2.%d. Got %v; want %v", i, actual[1].Value, expected1.Value)
}
}
// #3 Corner cases: Just before the first sample, just after the last.
expected := samples[0]
actual := it.GetValueAtTime(expected.Timestamp - 1)
if len(actual) != 1 {
t.Fatalf("3.1. Expected exactly one result, got %d.", len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("3.1. Got %v; want %v", actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("3.1. Got %v; want %v", actual[0].Value, expected.Value)
}
expected = samples[len(samples)-1]
actual = it.GetValueAtTime(expected.Timestamp + 1)
if len(actual) != 1 {
t.Fatalf("3.2. Expected exactly one result, got %d.", len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("3.2. Got %v; want %v", actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("3.2. Got %v; want %v", actual[0].Value, expected.Value)
}
}
func TestGetRangeValues(t *testing.T) {
samples := make(clientmodel.Samples, 1000)
for i := range samples {
samples[i] = &clientmodel.Sample{
Timestamp: clientmodel.Timestamp(2 * i),
Value: clientmodel.SampleValue(float64(i) * 0.2),
}
}
s, closer := NewTestStorage(t)
defer closer.Close()
s.AppendSamples(samples)
fp := clientmodel.Metric{}.Fingerprint()
it := s.NewIterator(fp)
// #1 Zero length interval at sample.
for i, expected := range samples {
actual := it.GetRangeValues(metric.Interval{
OldestInclusive: expected.Timestamp,
NewestInclusive: expected.Timestamp,
})
if len(actual) != 1 {
t.Fatalf("1.%d. Expected exactly one result, got %d.", i, len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("1.%d. Got %v; want %v.", i, actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("1.%d. Got %v; want %v.", i, actual[0].Value, expected.Value)
}
}
// #2 Zero length interval off sample.
for i, expected := range samples {
actual := it.GetRangeValues(metric.Interval{
OldestInclusive: expected.Timestamp + 1,
NewestInclusive: expected.Timestamp + 1,
})
if len(actual) != 0 {
t.Fatalf("2.%d. Expected no result, got %d.", i, len(actual))
}
}
// #3 2sec interval around sample.
for i, expected := range samples {
actual := it.GetRangeValues(metric.Interval{
OldestInclusive: expected.Timestamp - 1,
NewestInclusive: expected.Timestamp + 1,
})
if len(actual) != 1 {
t.Fatalf("3.%d. Expected exactly one result, got %d.", i, len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("3.%d. Got %v; want %v.", i, actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("3.%d. Got %v; want %v.", i, actual[0].Value, expected.Value)
}
}
// #4 2sec interval sample to sample.
for i, expected1 := range samples {
if i == len(samples)-1 {
continue
}
expected2 := samples[i+1]
actual := it.GetRangeValues(metric.Interval{
OldestInclusive: expected1.Timestamp,
NewestInclusive: expected1.Timestamp + 2,
})
if len(actual) != 2 {
t.Fatalf("4.%d. Expected exactly 2 results, got %d.", i, len(actual))
}
if expected1.Timestamp != actual[0].Timestamp {
t.Errorf("4.%d. Got %v for 1st result; want %v.", i, actual[0].Timestamp, expected1.Timestamp)
}
if expected1.Value != actual[0].Value {
t.Errorf("4.%d. Got %v for 1st result; want %v.", i, actual[0].Value, expected1.Value)
}
if expected2.Timestamp != actual[1].Timestamp {
t.Errorf("4.%d. Got %v for 2nd result; want %v.", i, actual[1].Timestamp, expected2.Timestamp)
}
if expected2.Value != actual[1].Value {
t.Errorf("4.%d. Got %v for 2nd result; want %v.", i, actual[1].Value, expected2.Value)
}
}
// #5 corner cases: Interval ends at first sample, interval starts
// at last sample, interval entirely before/after samples.
expected := samples[0]
actual := it.GetRangeValues(metric.Interval{
OldestInclusive: expected.Timestamp - 2,
NewestInclusive: expected.Timestamp,
})
if len(actual) != 1 {
t.Fatalf("5.1. Expected exactly one result, got %d.", len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("5.1. Got %v; want %v.", actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("5.1. Got %v; want %v.", actual[0].Value, expected.Value)
}
expected = samples[len(samples)-1]
actual = it.GetRangeValues(metric.Interval{
OldestInclusive: expected.Timestamp,
NewestInclusive: expected.Timestamp + 2,
})
if len(actual) != 1 {
t.Fatalf("5.2. Expected exactly one result, got %d.", len(actual))
}
if expected.Timestamp != actual[0].Timestamp {
t.Errorf("5.2. Got %v; want %v.", actual[0].Timestamp, expected.Timestamp)
}
if expected.Value != actual[0].Value {
t.Errorf("5.2. Got %v; want %v.", actual[0].Value, expected.Value)
}
firstSample := samples[0]
actual = it.GetRangeValues(metric.Interval{
OldestInclusive: firstSample.Timestamp - 4,
NewestInclusive: firstSample.Timestamp - 2,
})
if len(actual) != 0 {
t.Fatalf("5.3. Expected no results, got %d.", len(actual))
}
lastSample := samples[len(samples)-1]
actual = it.GetRangeValues(metric.Interval{
OldestInclusive: lastSample.Timestamp + 2,
NewestInclusive: lastSample.Timestamp + 4,
})
if len(actual) != 0 {
t.Fatalf("5.3. Expected no results, got %d.", len(actual))
}
}
func TestEvictAndPurgeSeries(t *testing.T) {
samples := make(clientmodel.Samples, 1000)
for i := range samples {
samples[i] = &clientmodel.Sample{
Timestamp: clientmodel.Timestamp(2 * i),
Value: clientmodel.SampleValue(float64(i) * 0.2),
}
}
s, closer := NewTestStorage(t)
defer closer.Close()
ms := s.(*memorySeriesStorage) // Going to test the internal purgeSeries method.
s.AppendSamples(samples)
fp := clientmodel.Metric{}.Fingerprint()
// Purge ~half of the chunks.
ms.purgeSeries(fp, 1000)
it := s.NewIterator(fp)
actual := it.GetBoundaryValues(metric.Interval{
OldestInclusive: 0,
NewestInclusive: 10000,
})
if len(actual) != 2 {
t.Fatal("expected two results after purging half of series")
}
if actual[0].Timestamp < 800 || actual[0].Timestamp > 1000 {
t.Errorf("1st timestamp out of expected range: %v", actual[0].Timestamp)
}
want := clientmodel.Timestamp(1998)
if actual[1].Timestamp != want {
t.Errorf("2nd timestamp: want %v, got %v", want, actual[1].Timestamp)
}
// Purge everything.
ms.purgeSeries(fp, 10000)
it = s.NewIterator(fp)
actual = it.GetBoundaryValues(metric.Interval{
OldestInclusive: 0,
NewestInclusive: 10000,
})
if len(actual) != 0 {
t.Fatal("expected zero results after purging the whole series")
}
// Recreate series.
s.AppendSamples(samples)
series, ok := ms.fpToSeries.get(fp)
if !ok {
t.Fatal("could not find series")
}
// Persist head chunk so we can safely archive.
series.headChunkPersisted = true
ms.persistQueue <- persistRequest{fp, series.head()}
time.Sleep(time.Second) // Give time for persisting to happen.
// Archive metrics.
ms.fpToSeries.del(fp)
if err := ms.persistence.archiveMetric(
fp, series.metric, series.firstTime(), series.lastTime(),
); err != nil {
t.Fatal(err)
}
archived, _, _, err := ms.persistence.hasArchivedMetric(fp)
if err != nil {
t.Fatal(err)
}
if !archived {
t.Fatal("not archived")
}
// Purge ~half of the chunks of an archived series.
ms.purgeSeries(fp, 1000)
archived, _, _, err = ms.persistence.hasArchivedMetric(fp)
if err != nil {
t.Fatal(err)
}
if !archived {
t.Fatal("archived series dropped although only half of the chunks purged")
}
// Purge everything.
ms.purgeSeries(fp, 10000)
archived, _, _, err = ms.persistence.hasArchivedMetric(fp)
if err != nil {
t.Fatal(err)
}
if archived {
t.Fatal("archived series not dropped")
}
}
func BenchmarkAppend(b *testing.B) {
samples := make(clientmodel.Samples, b.N)
for i := range samples {
samples[i] = &clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
"label1": clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
"label2": clientmodel.LabelValue(fmt.Sprintf("test_metric_%d", i%10)),
},
Timestamp: clientmodel.Timestamp(i),
Value: clientmodel.SampleValue(i),
}
}
b.ResetTimer()
s, closer := NewTestStorage(b)
defer closer.Close()
s.AppendSamples(samples)
}
// Append a large number of random samples and then check if we can get them out
// of the storage alright.
func TestFuzz(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test in short mode.")
}
check := func(seed int64) bool {
rand.Seed(seed)
s, c := NewTestStorage(t)
defer c.Close()
samples := createRandomSamples()
s.AppendSamples(samples)
return verifyStorage(t, s, samples, 24*7*time.Hour)
}
if err := quick.Check(check, nil); err != nil {
t.Fatal(err)
}
}
// BenchmarkFuzz is the benchmark version of TestFuzz. However, it will run
// several append and verify operations in parallel, if GOMAXPROC is set
// accordingly. Also, the storage options are set such that evictions,
// checkpoints, and purging will happen concurrently, too. This benchmark will
// have a very long runtime (up to minutes). You can use it as an actual
// benchmark. Run it like this:
//
// go test -cpu 1,2,4,8 -short -bench BenchmarkFuzz -benchmem
//
// You can also use it as a test for races. In that case, run it like this (will
// make things even slower):
//
// go test -race -cpu 8 -short -bench BenchmarkFuzz
func BenchmarkFuzz(b *testing.B) {
b.StopTimer()
rand.Seed(42)
directory := test.NewTemporaryDirectory("test_storage", b)
defer directory.Close()
o := &MemorySeriesStorageOptions{
MemoryChunks: 100,
PersistenceRetentionPeriod: time.Hour,
PersistenceStoragePath: directory.Path(),
CheckpointInterval: 3 * time.Second,
}
s, err := NewMemorySeriesStorage(o)
if err != nil {
b.Fatalf("Error creating storage: %s", err)
}
s.Start()
defer s.Stop()
b.StartTimer()
b.RunParallel(func(pb *testing.PB) {
var allSamples clientmodel.Samples
for pb.Next() {
newSamples := createRandomSamples()
allSamples = append(allSamples, newSamples[:len(newSamples)/2]...)
s.AppendSamples(newSamples[:len(newSamples)/2])
verifyStorage(b, s, allSamples, o.PersistenceRetentionPeriod)
allSamples = append(allSamples, newSamples[len(newSamples)/2:]...)
s.AppendSamples(newSamples[len(newSamples)/2:])
verifyStorage(b, s, allSamples, o.PersistenceRetentionPeriod)
}
})
}
func createRandomSamples() clientmodel.Samples {
type valueCreator func() clientmodel.SampleValue
type deltaApplier func(clientmodel.SampleValue) clientmodel.SampleValue
var (
maxMetrics = 5
maxCycles = 500
maxStreakLength = 500
maxTimeDelta = 1000
maxTimeDeltaFactor = 10
timestamp = clientmodel.Now() - clientmodel.Timestamp(maxTimeDelta*maxTimeDeltaFactor*maxCycles*maxStreakLength/16) // So that some timestamps are in the future.
generators = []struct {
createValue valueCreator
applyDelta []deltaApplier
}{
{ // "Boolean".
createValue: func() clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(2))
},
applyDelta: []deltaApplier{
func(_ clientmodel.SampleValue) clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(2))
},
},
},
{ // Integer with int deltas of various byte length.
createValue: func() clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Int63() - 1<<62)
},
applyDelta: []deltaApplier{
func(v clientmodel.SampleValue) clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(1<<8) - 1<<7 + int(v))
},
func(v clientmodel.SampleValue) clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(1<<16) - 1<<15 + int(v))
},
func(v clientmodel.SampleValue) clientmodel.SampleValue {
return clientmodel.SampleValue(rand.Intn(1<<32) - 1<<31 + int(v))
},
},
},
{ // Float with float32 and float64 deltas.
createValue: func() clientmodel.SampleValue {
return clientmodel.SampleValue(rand.NormFloat64())
},
applyDelta: []deltaApplier{
func(v clientmodel.SampleValue) clientmodel.SampleValue {
return v + clientmodel.SampleValue(float32(rand.NormFloat64()))
},
func(v clientmodel.SampleValue) clientmodel.SampleValue {
return v + clientmodel.SampleValue(rand.NormFloat64())
},
},
},
}
)
result := clientmodel.Samples{}
metrics := []clientmodel.Metric{}
for n := rand.Intn(maxMetrics); n >= 0; n-- {
metrics = append(metrics, clientmodel.Metric{
clientmodel.LabelName(fmt.Sprintf("labelname_%d", n+1)): clientmodel.LabelValue(fmt.Sprintf("labelvalue_%d", rand.Int())),
})
}
for n := rand.Intn(maxCycles); n >= 0; n-- {
// Pick a metric for this cycle.
metric := metrics[rand.Intn(len(metrics))]
timeDelta := rand.Intn(maxTimeDelta) + 1
generator := generators[rand.Intn(len(generators))]
createValue := generator.createValue
applyDelta := generator.applyDelta[rand.Intn(len(generator.applyDelta))]
incTimestamp := func() { timestamp += clientmodel.Timestamp(timeDelta * (rand.Intn(maxTimeDeltaFactor) + 1)) }
switch rand.Intn(4) {
case 0: // A single sample.
result = append(result, &clientmodel.Sample{
Metric: metric,
Value: createValue(),
Timestamp: timestamp,
})
incTimestamp()
case 1: // A streak of random sample values.
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
result = append(result, &clientmodel.Sample{
Metric: metric,
Value: createValue(),
Timestamp: timestamp,
})
incTimestamp()
}
case 2: // A streak of sample values with incremental changes.
value := createValue()
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
result = append(result, &clientmodel.Sample{
Metric: metric,
Value: value,
Timestamp: timestamp,
})
incTimestamp()
value = applyDelta(value)
}
case 3: // A streak of constant sample values.
value := createValue()
for n := rand.Intn(maxStreakLength); n >= 0; n-- {
result = append(result, &clientmodel.Sample{
Metric: metric,
Value: value,
Timestamp: timestamp,
})
incTimestamp()
}
}
}
return result
}
func verifyStorage(t testing.TB, s Storage, samples clientmodel.Samples, maxAge time.Duration) bool {
result := true
for _, i := range rand.Perm(len(samples)) {
sample := samples[i]
if sample.Timestamp.Before(clientmodel.TimestampFromTime(time.Now().Add(-maxAge))) {
continue
// TODO: Once we have a guaranteed cutoff at the
// retention period, we can verify here that no results
// are returned.
}
fp := sample.Metric.Fingerprint()
p := s.NewPreloader()
p.PreloadRange(fp, sample.Timestamp, sample.Timestamp, time.Hour)
found := s.NewIterator(fp).GetValueAtTime(sample.Timestamp)
if len(found) != 1 {
t.Errorf("Sample %#v: Expected exactly one value, found %d.", sample, len(found))
result = false
p.Close()
continue
}
want := float64(sample.Value)
got := float64(found[0].Value)
if want != got || sample.Timestamp != found[0].Timestamp {
t.Errorf(
"Value (or timestamp) mismatch, want %f (at time %v), got %f (at time %v).",
want, sample.Timestamp, got, found[0].Timestamp,
)
result = false
}
p.Close()
}
return result
}

View file

@ -0,0 +1,58 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"testing"
"time"
"github.com/prometheus/prometheus/utility/test"
)
type testStorageCloser struct {
storage Storage
directory test.Closer
}
func (t *testStorageCloser) Close() {
t.storage.Stop()
t.directory.Close()
}
// NewTestStorage creates a storage instance backed by files in a temporary
// directory. The returned storage is already in serving state. Upon closing the
// returned test.Closer, the temporary directory is cleaned up.
func NewTestStorage(t testing.TB) (Storage, test.Closer) {
directory := test.NewTemporaryDirectory("test_storage", t)
o := &MemorySeriesStorageOptions{
MemoryChunks: 1000000,
PersistenceRetentionPeriod: 24 * 7 * time.Hour,
PersistenceStoragePath: directory.Path(),
CheckpointInterval: time.Hour,
}
storage, err := NewMemorySeriesStorage(o)
if err != nil {
directory.Close()
t.Fatalf("Error creating storage: %s", err)
}
storage.Start()
closer := &testStorageCloser{
storage: storage,
directory: directory,
}
return storage, closer
}

View file

@ -1,142 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
)
// Persistence is a system for storing metric samples in a persistence
// layer.
type Persistence interface {
// A storage system may rely on external resources and thusly should be
// closed when finished.
Close()
// Record a group of new samples in the storage layer. Multiple samples for
// the same fingerprint need to be submitted in chronological order, from
// oldest to newest (both in the same call to AppendSamples and across
// multiple calls).
AppendSamples(clientmodel.Samples) error
// Get all of the metric fingerprints that are associated with the
// provided label matchers.
GetFingerprintsForLabelMatchers(LabelMatchers) (clientmodel.Fingerprints, error)
// Get all of the label values that are associated with a given label name.
GetLabelValuesForLabelName(clientmodel.LabelName) (clientmodel.LabelValues, error)
// Get the metric associated with the provided fingerprint.
GetMetricForFingerprint(*clientmodel.Fingerprint) (clientmodel.Metric, error)
// Get all label values that are associated with a given label name.
GetAllValuesForLabel(clientmodel.LabelName) (clientmodel.LabelValues, error)
}
// PreloadingPersistence is a Persistence which supports building
// preloaded views.
type PreloadingPersistence interface {
Persistence
// NewViewRequestBuilder furnishes a ViewRequestBuilder for remarking what
// types of queries to perform.
NewViewRequestBuilder() ViewRequestBuilder
}
// View provides a view of the values in the datastore subject to the request
// of a preloading operation.
type View interface {
// Get the two values that are immediately adjacent to a given time.
GetValueAtTime(*clientmodel.Fingerprint, clientmodel.Timestamp) Values
// Get the boundary values of an interval: the first value older than
// the interval start, and the first value younger than the interval
// end.
GetBoundaryValues(*clientmodel.Fingerprint, Interval) Values
// Get all values contained within a provided interval.
GetRangeValues(*clientmodel.Fingerprint, Interval) Values
}
// ViewablePersistence is a Persistence that is able to present the
// samples it has stored as a View.
type ViewablePersistence interface {
Persistence
View
}
// ViewRequestBuilder represents the summation of all datastore queries that
// shall be performed to extract values. Call the Get... methods to record the
// queries. Once done, use HasOp and PopOp to retrieve the resulting
// operations. The operations are sorted by their fingerprint (and, for equal
// fingerprints, by the StartsAt timestamp of their operation).
type ViewRequestBuilder interface {
// GetMetricAtTime records a query to get, for the given Fingerprint,
// either the value at that time if there is a match or the one or two
// values adjacent thereto.
GetMetricAtTime(fingerprint *clientmodel.Fingerprint, time clientmodel.Timestamp)
// GetMetricAtInterval records a query to get, for the given
// Fingerprint, either the value at that interval from From through
// Through if there is a match or the one or two values adjacent for
// each point.
GetMetricAtInterval(fingerprint *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval time.Duration)
// GetMetricRange records a query to get, for the given Fingerprint, the
// values that occur inclusively from From through Through.
GetMetricRange(fingerprint *clientmodel.Fingerprint, from, through clientmodel.Timestamp)
// GetMetricRangeAtInterval records a query to get value ranges at
// intervals for the given Fingerprint:
//
// |----| |----| |----| |----|
// ^ ^ ^ ^ ^ ^
// | \------------/ \----/ |
// from interval rangeDuration through
GetMetricRangeAtInterval(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration)
// Execute materializes a View, subject to a timeout.
Execute(deadline time.Duration, queryStats *stats.TimerGroup) (View, error)
// PopOp emits the next operation in the queue (sorted by
// fingerprint). If called while HasOps returns false, the
// behavior is undefined.
PopOp() Op
// HasOp returns true if there is at least one more operation in the
// queue.
HasOp() bool
}
// Op encapsulates a primitive query operation.
type Op interface {
// Fingerprint returns the fingerprint of the metric this operation
// operates on.
Fingerprint() *clientmodel.Fingerprint
// ExtractSamples extracts samples from a stream of values and advances
// the operation time.
ExtractSamples(Values) Values
// Consumed returns whether the operator has consumed all data it needs.
Consumed() bool
// CurrentTime gets the current operation time. In a newly created op,
// this is the starting time of the operation. During ongoing execution
// of the op, the current time is advanced accordingly. Once no
// subsequent work associated with the operation remains, nil is
// returned.
CurrentTime() clientmodel.Timestamp
}
// CurationState contains high-level curation state information for the
// heads-up-display.
type CurationState struct {
Active bool
Name string
Limit time.Duration
Fingerprint *clientmodel.Fingerprint
}

View file

@ -14,16 +14,14 @@
package metric
import (
"bytes"
"fmt"
"sort"
clientmodel "github.com/prometheus/client_golang/model"
)
// MarshalJSON implements json.Marshaler.
func (s SamplePair) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("{\"Value\": \"%f\", \"Timestamp\": %d}", s.Value, s.Timestamp)), nil
return []byte(fmt.Sprintf("{\"Value\": \"%f\", \"Timestamp\": %s}", s.Value, s.Timestamp.String())), nil
}
// SamplePair pairs a SampleValue with a Timestamp.
@ -46,96 +44,9 @@ func (s *SamplePair) String() string {
return fmt.Sprintf("SamplePair at %s of %s", s.Timestamp, s.Value)
}
// Values is a sortable slice of SamplePairs (as in: it implements
// sort.Interface). Sorting happens by Timestamp.
// Values is a slice of SamplePairs.
type Values []SamplePair
// Len implements sort.Interface.
func (v Values) Len() int {
return len(v)
}
// Less implements sort.Interface.
func (v Values) Less(i, j int) bool {
return v[i].Timestamp.Before(v[j].Timestamp)
}
// Swap implements sort.Interface.
func (v Values) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}
// Equal returns true if these Values are of the same length as o, and each
// value is equal to the corresponding value in o (i.e. at the same index).
func (v Values) Equal(o Values) bool {
if len(v) != len(o) {
return false
}
for i, expected := range v {
if !expected.Equal(&o[i]) {
return false
}
}
return true
}
// FirstTimeAfter indicates whether the first sample of a set is after a given
// timestamp.
func (v Values) FirstTimeAfter(t clientmodel.Timestamp) bool {
return v[0].Timestamp.After(t)
}
// LastTimeBefore indicates whether the last sample of a set is before a given
// timestamp.
func (v Values) LastTimeBefore(t clientmodel.Timestamp) bool {
return v[len(v)-1].Timestamp.Before(t)
}
// InsideInterval indicates whether a given range of sorted values could contain
// a value for a given time.
func (v Values) InsideInterval(t clientmodel.Timestamp) bool {
switch {
case v.Len() == 0:
return false
case t.Before(v[0].Timestamp):
return false
case !v[v.Len()-1].Timestamp.Before(t):
return false
default:
return true
}
}
// TruncateBefore returns a subslice of the original such that extraneous
// samples in the collection that occur before the provided time are
// dropped. The original slice is not mutated.
func (v Values) TruncateBefore(t clientmodel.Timestamp) Values {
index := sort.Search(len(v), func(i int) bool {
timestamp := v[i].Timestamp
return !timestamp.Before(t)
})
return v[index:]
}
func (v Values) String() string {
buffer := bytes.Buffer{}
fmt.Fprintf(&buffer, "[")
for i, value := range v {
fmt.Fprintf(&buffer, "%d. %s", i, value)
if i != len(v)-1 {
fmt.Fprintf(&buffer, "\n")
}
}
fmt.Fprintf(&buffer, "]")
return buffer.String()
}
// SampleSet is Values with a Metric attached.
type SampleSet struct {
Metric clientmodel.Metric

View file

@ -1,267 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"flag"
"fmt"
"sort"
"testing"
"time"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/metric"
clientmodel "github.com/prometheus/client_golang/model"
)
type nopCurationStateUpdater struct{}
func (n *nopCurationStateUpdater) UpdateCurationState(*metric.CurationState) {}
func generateTestSamples(endTime clientmodel.Timestamp, numTs int, samplesPerTs int, interval time.Duration) clientmodel.Samples {
samples := make(clientmodel.Samples, 0, numTs*samplesPerTs)
startTime := endTime.Add(-interval * time.Duration(samplesPerTs-1))
for ts := 0; ts < numTs; ts++ {
metric := clientmodel.Metric{}
metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_%d", ts))
for i := 0; i < samplesPerTs; i++ {
sample := &clientmodel.Sample{
Metric: metric,
Value: clientmodel.SampleValue(ts + 1000*i),
Timestamp: startTime.Add(interval * time.Duration(i)),
}
samples = append(samples, sample)
}
}
sort.Sort(samples)
return samples
}
type compactionChecker struct {
t *testing.T
sampleIdx int
numChunks int
expectedSamples clientmodel.Samples
}
func (c *compactionChecker) Operate(key, value interface{}) *storage.OperatorError {
c.numChunks++
sampleKey := key.(*SampleKey)
if sampleKey.FirstTimestamp.After(sampleKey.LastTimestamp) {
c.t.Fatalf("Chunk FirstTimestamp (%v) is after LastTimestamp (%v): %v", sampleKey.FirstTimestamp.Unix(), sampleKey.LastTimestamp.Unix(), sampleKey)
}
fp := &clientmodel.Fingerprint{}
for _, sample := range value.(metric.Values) {
if sample.Timestamp.Before(sampleKey.FirstTimestamp) || sample.Timestamp.After(sampleKey.LastTimestamp) {
c.t.Fatalf("Sample not within chunk boundaries: chunk FirstTimestamp (%v), chunk LastTimestamp (%v) vs. sample Timestamp (%v)", sampleKey.FirstTimestamp.Unix(), sampleKey.LastTimestamp.Unix(), sample.Timestamp)
}
expected := c.expectedSamples[c.sampleIdx]
fp.LoadFromMetric(expected.Metric)
if !sampleKey.Fingerprint.Equal(fp) {
c.t.Fatalf("%d. Expected fingerprint %s, got %s", c.sampleIdx, fp, sampleKey.Fingerprint)
}
sp := &metric.SamplePair{
Value: expected.Value,
Timestamp: expected.Timestamp,
}
if !sample.Equal(sp) {
c.t.Fatalf("%d. Expected sample %s, got %s", c.sampleIdx, sp, sample)
}
c.sampleIdx++
}
return nil
}
func checkStorageSaneAndEquivalent(t *testing.T, name string, ts *TieredStorage, samples clientmodel.Samples, expectedNumChunks int) {
cc := &compactionChecker{
expectedSamples: samples,
t: t,
}
entire, err := ts.DiskStorage.MetricSamples.ForEach(&MetricSamplesDecoder{}, &AcceptAllFilter{}, cc)
if err != nil {
t.Fatalf("%s: Error checking samples: %s", name, err)
}
if !entire {
t.Fatalf("%s: Didn't scan entire corpus", name)
}
if cc.numChunks != expectedNumChunks {
t.Fatalf("%s: Expected %d chunks, got %d", name, expectedNumChunks, cc.numChunks)
}
}
type compactionTestScenario struct {
leveldbChunkSize int
numTimeseries int
samplesPerTs int
ignoreYoungerThan time.Duration
maximumMutationPoolBatch int
minimumGroupSize int
uncompactedChunks int
compactedChunks int
}
func (s compactionTestScenario) test(t *testing.T) {
defer flag.Set("leveldbChunkSize", flag.Lookup("leveldbChunkSize").Value.String())
flag.Set("leveldbChunkSize", fmt.Sprintf("%d", s.leveldbChunkSize))
ts, closer := NewTestTieredStorage(t)
defer closer.Close()
// 1. Store test values.
samples := generateTestSamples(testInstant, s.numTimeseries, s.samplesPerTs, time.Minute)
ts.AppendSamples(samples)
ts.Flush()
// 2. Check sanity of uncompacted values.
checkStorageSaneAndEquivalent(t, "Before compaction", ts, samples, s.uncompactedChunks)
// 3. Compact test storage.
processor := NewCompactionProcessor(&CompactionProcessorOptions{
MaximumMutationPoolBatch: s.maximumMutationPoolBatch,
MinimumGroupSize: s.minimumGroupSize,
})
defer processor.Close()
curator := NewCurator(&CuratorOptions{
Stop: make(chan struct{}),
ViewQueue: ts.ViewQueue,
})
defer curator.Close()
err := curator.Run(s.ignoreYoungerThan, testInstant, processor, ts.DiskStorage.CurationRemarks, ts.DiskStorage.MetricSamples, ts.DiskStorage.MetricHighWatermarks, &nopCurationStateUpdater{})
if err != nil {
t.Fatalf("Failed to run curator: %s", err)
}
// 4. Check sanity of compacted values.
checkStorageSaneAndEquivalent(t, "After compaction", ts, samples, s.compactedChunks)
}
func TestCompaction(t *testing.T) {
scenarios := []compactionTestScenario{
// BEFORE COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 5 | A | 1 .. 5
// 5 | A | 6 .. 10
// 5 | A | 11 .. 15
// 5 | B | 1 .. 5
// 5 | B | 6 .. 10
// 5 | B | 11 .. 15
// 5 | C | 1 .. 5
// 5 | C | 6 .. 10
// 5 | C | 11 .. 15
//
// AFTER COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 10 | A | 1 .. 10
// 5 | A | 11 .. 15
// 10 | B | 1 .. 10
// 5 | B | 11 .. 15
// 10 | C | 1 .. 10
// 5 | C | 11 .. 15
{
leveldbChunkSize: 5,
numTimeseries: 3,
samplesPerTs: 15,
ignoreYoungerThan: time.Minute,
maximumMutationPoolBatch: 30,
minimumGroupSize: 10,
uncompactedChunks: 9,
compactedChunks: 6,
},
// BEFORE COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 5 | A | 1 .. 5
// 5 | A | 6 .. 10
// 5 | A | 11 .. 15
// 5 | B | 1 .. 5
// 5 | B | 6 .. 10
// 5 | B | 11 .. 15
// 5 | C | 1 .. 5
// 5 | C | 6 .. 10
// 5 | C | 11 .. 15
//
// AFTER COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 10 | A | 1 .. 15
// 10 | B | 1 .. 15
// 10 | C | 1 .. 15
{
leveldbChunkSize: 5,
numTimeseries: 3,
samplesPerTs: 15,
ignoreYoungerThan: time.Minute,
maximumMutationPoolBatch: 30,
minimumGroupSize: 30,
uncompactedChunks: 9,
compactedChunks: 3,
},
// BEFORE COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 5 | A | 1 .. 5
// 5 | A | 6 .. 10
// 5 | A | 11 .. 15
// 5 | A | 16 .. 20
// 5 | B | 1 .. 5
// 5 | B | 6 .. 10
// 5 | B | 11 .. 15
// 5 | B | 16 .. 20
// 5 | C | 1 .. 5
// 5 | C | 6 .. 10
// 5 | C | 11 .. 15
// 5 | C | 16 .. 20
//
// AFTER COMPACTION:
//
// Chunk size | Fingerprint | Samples
// 10 | A | 1 .. 15
// 10 | A | 16 .. 20
// 10 | B | 1 .. 15
// 10 | B | 16 .. 20
// 10 | C | 1 .. 15
// 10 | C | 16 .. 20
{
leveldbChunkSize: 5,
numTimeseries: 3,
samplesPerTs: 20,
ignoreYoungerThan: time.Minute,
maximumMutationPoolBatch: 30,
minimumGroupSize: 10,
uncompactedChunks: 12,
compactedChunks: 6,
},
}
for _, s := range scenarios {
s.test(t)
}
}

View file

@ -1,509 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"bytes"
"errors"
"fmt"
"strings"
"time"
"code.google.com/p/goprotobuf/proto"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/storage/raw/leveldb"
dto "github.com/prometheus/prometheus/model/generated"
)
const curationYieldPeriod = 250 * time.Millisecond
var errIllegalIterator = errors.New("iterator invalid")
// Constants for instrumentation.
const (
cutOff = "recency_threshold"
processorName = "processor"
)
var (
curationDurations = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "curation_durations_milliseconds",
Help: "Histogram of time spent in curation.",
Objectives: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
},
[]string{cutOff, processorName, result},
)
curationFilterOperations = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: "curation_filter_operations_total",
Help: "The number of curation filter operations completed.",
},
[]string{cutOff, processorName, result},
)
)
func init() {
prometheus.MustRegister(curationDurations)
prometheus.MustRegister(curationFilterOperations)
}
// CurationStateUpdater receives updates about the curation state.
type CurationStateUpdater interface {
UpdateCurationState(*metric.CurationState)
}
// CuratorOptions bundles the parameters needed to create a Curator.
type CuratorOptions struct {
Stop chan struct{}
ViewQueue chan viewJob
}
// Curator is responsible for effectuating a given curation policy across the
// stored samples on-disk. This is useful to compact sparse sample values into
// single sample entities to reduce keyspace load on the datastore.
type Curator struct {
stop chan struct{}
viewQueue chan viewJob
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
}
// NewCurator returns an initialized Curator.
func NewCurator(o *CuratorOptions) *Curator {
return &Curator{
stop: o.Stop,
viewQueue: o.ViewQueue,
dtoSampleKeys: newDtoSampleKeyList(10),
sampleKeys: newSampleKeyList(10),
}
}
// watermarkScanner converts (dto.Fingerprint, dto.MetricHighWatermark) doubles
// into (model.Fingerprint, model.Watermark) doubles.
//
// watermarkScanner determines whether to include or exclude candidate
// values from the curation process by virtue of how old the high watermark is.
//
// watermarkScanner scans over the curator.samples table for metrics whose
// high watermark has been determined to be allowable for curation. This type
// is individually responsible for compaction.
//
// The scanning starts from CurationRemark.LastCompletionTimestamp and goes
// forward until the stop point or end of the series is reached.
type watermarkScanner struct {
// curationState is the data store for curation remarks.
curationState CurationRemarker
// ignoreYoungerThan is passed into the curation remark for the given series.
ignoreYoungerThan time.Duration
// processor is responsible for executing a given stategy on the
// to-be-operated-on series.
processor Processor
// sampleIterator is a snapshotted iterator for the time series.
sampleIterator leveldb.Iterator
// samples
samples raw.Persistence
// stopAt is a cue for when to stop mutating a given series.
stopAt clientmodel.Timestamp
// stop functions as the global stop channel for all future operations.
stop chan struct{}
// status is the outbound channel for notifying the status page of its state.
status CurationStateUpdater
firstBlock, lastBlock *SampleKey
ViewQueue chan viewJob
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
}
// Run facilitates the curation lifecycle.
//
// recencyThreshold represents the most recent time up to which values will be
// curated.
// curationState is the on-disk store where the curation remarks are made for
// how much progress has been made.
func (c *Curator) Run(ignoreYoungerThan time.Duration, instant clientmodel.Timestamp, processor Processor, curationState CurationRemarker, samples *leveldb.LevelDBPersistence, watermarks HighWatermarker, status CurationStateUpdater) (err error) {
defer func(t time.Time) {
duration := float64(time.Since(t) / time.Millisecond)
labels := prometheus.Labels{
cutOff: fmt.Sprint(ignoreYoungerThan),
processorName: processor.Name(),
result: success,
}
if err != nil {
labels[result] = failure
}
curationDurations.With(labels).Observe(duration)
}(time.Now())
defer status.UpdateCurationState(&metric.CurationState{Active: false})
iterator, err := samples.NewIterator(true)
if err != nil {
return err
}
defer iterator.Close()
if !iterator.SeekToLast() {
glog.Info("Empty database; skipping curation.")
return
}
keyDto, _ := c.dtoSampleKeys.Get()
defer c.dtoSampleKeys.Give(keyDto)
lastBlock, _ := c.sampleKeys.Get()
defer c.sampleKeys.Give(lastBlock)
if err := iterator.Key(keyDto); err != nil {
panic(err)
}
lastBlock.Load(keyDto)
if !iterator.SeekToFirst() {
glog.Info("Empty database; skipping curation.")
return
}
firstBlock, _ := c.sampleKeys.Get()
defer c.sampleKeys.Give(firstBlock)
if err := iterator.Key(keyDto); err != nil {
panic(err)
}
firstBlock.Load(keyDto)
scanner := &watermarkScanner{
curationState: curationState,
ignoreYoungerThan: ignoreYoungerThan,
processor: processor,
status: status,
stop: c.stop,
stopAt: instant.Add(-1 * ignoreYoungerThan),
sampleIterator: iterator,
samples: samples,
firstBlock: firstBlock,
lastBlock: lastBlock,
ViewQueue: c.viewQueue,
dtoSampleKeys: c.dtoSampleKeys,
sampleKeys: c.sampleKeys,
}
// Right now, the ability to stop a curation is limited to the beginning of
// each fingerprint cycle. It is impractical to cease the work once it has
// begun for a given series.
_, err = watermarks.ForEach(scanner, scanner, scanner)
return
}
// Close needs to be called to cleanly dispose of a curator.
func (c *Curator) Close() {
c.dtoSampleKeys.Close()
c.sampleKeys.Close()
}
func (w *watermarkScanner) DecodeKey(in interface{}) (interface{}, error) {
key := &dto.Fingerprint{}
bytes := in.([]byte)
if err := proto.Unmarshal(bytes, key); err != nil {
return nil, err
}
fingerprint := &clientmodel.Fingerprint{}
loadFingerprint(fingerprint, key)
return fingerprint, nil
}
func (w *watermarkScanner) DecodeValue(in interface{}) (interface{}, error) {
value := &dto.MetricHighWatermark{}
bytes := in.([]byte)
if err := proto.Unmarshal(bytes, value); err != nil {
return nil, err
}
watermark := &watermarks{}
watermark.load(value)
return watermark, nil
}
func (w *watermarkScanner) shouldStop() bool {
select {
case _, ok := <-w.stop:
if ok {
panic("channel should be closed only")
}
return true
default:
return false
}
}
func (w *watermarkScanner) Filter(key, value interface{}) (r storage.FilterResult) {
fingerprint := key.(*clientmodel.Fingerprint)
defer func() {
labels := prometheus.Labels{
cutOff: fmt.Sprint(w.ignoreYoungerThan),
result: strings.ToLower(r.String()),
processorName: w.processor.Name(),
}
curationFilterOperations.With(labels).Inc()
w.status.UpdateCurationState(&metric.CurationState{
Active: true,
Name: w.processor.Name(),
Limit: w.ignoreYoungerThan,
Fingerprint: fingerprint,
})
}()
if w.shouldStop() {
return storage.Stop
}
k := &curationKey{
Fingerprint: fingerprint,
ProcessorMessageRaw: w.processor.Signature(),
ProcessorMessageTypeName: w.processor.Name(),
IgnoreYoungerThan: w.ignoreYoungerThan,
}
curationRemark, present, err := w.curationState.Get(k)
if err != nil {
return
}
if !present {
return storage.Accept
}
if !curationRemark.Before(w.stopAt) {
return storage.Skip
}
watermark := value.(*watermarks)
if !curationRemark.Before(watermark.High) {
return storage.Skip
}
curationConsistent, err := w.curationConsistent(fingerprint, watermark)
if err != nil {
return
}
if curationConsistent {
return storage.Skip
}
return storage.Accept
}
// curationConsistent determines whether the given metric is in a dirty state
// and needs curation.
func (w *watermarkScanner) curationConsistent(f *clientmodel.Fingerprint, watermark *watermarks) (bool, error) {
k := &curationKey{
Fingerprint: f,
ProcessorMessageRaw: w.processor.Signature(),
ProcessorMessageTypeName: w.processor.Name(),
IgnoreYoungerThan: w.ignoreYoungerThan,
}
curationRemark, present, err := w.curationState.Get(k)
if err != nil {
return false, err
}
if !present {
return false, nil
}
if !curationRemark.Before(watermark.High) {
return true, nil
}
return false, nil
}
func (w *watermarkScanner) Operate(key, _ interface{}) (oErr *storage.OperatorError) {
fingerprint := key.(*clientmodel.Fingerprint)
glog.Infof("Curating %s...", fingerprint)
if len(w.ViewQueue) > 0 {
glog.Warning("Deferred due to view queue.")
time.Sleep(curationYieldPeriod)
}
if fingerprint.Less(w.firstBlock.Fingerprint) {
glog.Warning("Skipped since before keyspace.")
return nil
}
if w.lastBlock.Fingerprint.Less(fingerprint) {
glog.Warning("Skipped since after keyspace.")
return nil
}
curationState, _, err := w.curationState.Get(&curationKey{
Fingerprint: fingerprint,
ProcessorMessageRaw: w.processor.Signature(),
ProcessorMessageTypeName: w.processor.Name(),
IgnoreYoungerThan: w.ignoreYoungerThan,
})
if err != nil {
glog.Warning("Unable to get curation state: %s", err)
// An anomaly with the curation remark is likely not fatal in the sense that
// there was a decoding error with the entity and shouldn't be cause to stop
// work. The process will simply start from a pessimistic work time and
// work forward. With an idempotent processor, this is safe.
return &storage.OperatorError{Error: err, Continuable: true}
}
keySet, _ := w.sampleKeys.Get()
defer w.sampleKeys.Give(keySet)
keySet.Fingerprint = fingerprint
keySet.FirstTimestamp = curationState
// Invariant: The fingerprint tests above ensure that we have the same
// fingerprint.
keySet.Constrain(w.firstBlock, w.lastBlock)
seeker := &iteratorSeekerState{
i: w.sampleIterator,
obj: keySet,
first: w.firstBlock,
last: w.lastBlock,
dtoSampleKeys: w.dtoSampleKeys,
sampleKeys: w.sampleKeys,
}
for state := seeker.initialize; state != nil; state = state() {
}
if seeker.err != nil {
glog.Warningf("Got error in state machine: %s", seeker.err)
return &storage.OperatorError{Error: seeker.err, Continuable: !seeker.iteratorInvalid}
}
if seeker.iteratorInvalid {
glog.Warningf("Got illegal iterator in state machine: %s", err)
return &storage.OperatorError{Error: errIllegalIterator, Continuable: false}
}
if !seeker.seriesOperable {
return
}
lastTime, err := w.processor.Apply(w.sampleIterator, w.samples, w.stopAt, fingerprint)
if err != nil {
// We can't divine the severity of a processor error without refactoring the
// interface.
return &storage.OperatorError{Error: err, Continuable: false}
}
if err = w.curationState.Update(&curationKey{
Fingerprint: fingerprint,
ProcessorMessageRaw: w.processor.Signature(),
ProcessorMessageTypeName: w.processor.Name(),
IgnoreYoungerThan: w.ignoreYoungerThan,
}, lastTime); err != nil {
// Under the assumption that the processors are idempotent, they can be
// re-run; thusly, the commitment of the curation remark is no cause
// to cease further progress.
return &storage.OperatorError{Error: err, Continuable: true}
}
return nil
}
// curationKey provides a representation of dto.CurationKey with associated
// business logic methods attached to it to enhance code readability.
type curationKey struct {
Fingerprint *clientmodel.Fingerprint
ProcessorMessageRaw []byte
ProcessorMessageTypeName string
IgnoreYoungerThan time.Duration
}
// Equal answers whether the two curationKeys are equivalent.
func (c *curationKey) Equal(o *curationKey) bool {
switch {
case !c.Fingerprint.Equal(o.Fingerprint):
return false
case bytes.Compare(c.ProcessorMessageRaw, o.ProcessorMessageRaw) != 0:
return false
case c.ProcessorMessageTypeName != o.ProcessorMessageTypeName:
return false
case c.IgnoreYoungerThan != o.IgnoreYoungerThan:
return false
}
return true
}
func (c *curationKey) dump(d *dto.CurationKey) {
d.Reset()
// BUG(matt): Avenue for simplification.
fingerprintDTO := &dto.Fingerprint{}
dumpFingerprint(fingerprintDTO, c.Fingerprint)
d.Fingerprint = fingerprintDTO
d.ProcessorMessageRaw = c.ProcessorMessageRaw
d.ProcessorMessageTypeName = proto.String(c.ProcessorMessageTypeName)
d.IgnoreYoungerThan = proto.Int64(int64(c.IgnoreYoungerThan))
}
func (c *curationKey) load(d *dto.CurationKey) {
// BUG(matt): Avenue for simplification.
c.Fingerprint = &clientmodel.Fingerprint{}
loadFingerprint(c.Fingerprint, d.Fingerprint)
c.ProcessorMessageRaw = d.ProcessorMessageRaw
c.ProcessorMessageTypeName = d.GetProcessorMessageTypeName()
c.IgnoreYoungerThan = time.Duration(d.GetIgnoreYoungerThan())
}

View file

@ -1,68 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"sort"
"code.google.com/p/goprotobuf/proto"
clientmodel "github.com/prometheus/client_golang/model"
dto "github.com/prometheus/prometheus/model/generated"
)
func dumpFingerprint(d *dto.Fingerprint, f *clientmodel.Fingerprint) {
d.Reset()
d.Signature = proto.String(f.String())
}
func loadFingerprint(f *clientmodel.Fingerprint, d *dto.Fingerprint) {
f.LoadFromString(d.GetSignature())
}
func dumpMetric(d *dto.Metric, m clientmodel.Metric) {
d.Reset()
metricLength := len(m)
labelNames := make([]string, 0, metricLength)
for labelName := range m {
labelNames = append(labelNames, string(labelName))
}
sort.Strings(labelNames)
pairs := make([]*dto.LabelPair, 0, metricLength)
for _, labelName := range labelNames {
l := clientmodel.LabelName(labelName)
labelValue := m[l]
labelPair := &dto.LabelPair{
Name: proto.String(string(labelName)),
Value: proto.String(string(labelValue)),
}
pairs = append(pairs, labelPair)
}
d.LabelPair = pairs
}
func dumpLabelName(d *dto.LabelName, l clientmodel.LabelName) {
d.Reset()
d.Name = proto.String(string(l))
}

View file

@ -1,548 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"sort"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
func GetFingerprintsForLabelSetTests(p metric.Persistence, t testing.TB) {
metrics := []clientmodel.Metric{
{
clientmodel.MetricNameLabel: "test_metric",
"method": "get",
"result": "success",
},
{
clientmodel.MetricNameLabel: "test_metric",
"method": "get",
"result": "failure",
},
{
clientmodel.MetricNameLabel: "test_metric",
"method": "post",
"result": "success",
},
{
clientmodel.MetricNameLabel: "test_metric",
"method": "post",
"result": "failure",
},
}
newTestLabelMatcher := func(matchType metric.MatchType, name clientmodel.LabelName, value clientmodel.LabelValue) *metric.LabelMatcher {
m, err := metric.NewLabelMatcher(matchType, name, value)
if err != nil {
t.Fatalf("Couldn't create label matcher: %v", err)
}
return m
}
scenarios := []struct {
in metric.LabelMatchers
outIndexes []int
}{
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
},
outIndexes: []int{0, 1, 2, 3},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "non_existent_metric"),
},
outIndexes: []int{},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "non_existent_metric"),
newTestLabelMatcher(metric.Equal, "result", "success"),
},
outIndexes: []int{},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
newTestLabelMatcher(metric.Equal, "result", "success"),
},
outIndexes: []int{0, 2},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
newTestLabelMatcher(metric.NotEqual, "result", "success"),
},
outIndexes: []int{1, 3},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
newTestLabelMatcher(metric.RegexMatch, "result", "foo|success|bar"),
},
outIndexes: []int{0, 2},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
newTestLabelMatcher(metric.RegexNoMatch, "result", "foo|success|bar"),
},
outIndexes: []int{1, 3},
},
{
in: metric.LabelMatchers{
newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"),
newTestLabelMatcher(metric.RegexNoMatch, "result", "foo|success|bar"),
newTestLabelMatcher(metric.RegexMatch, "method", "os"),
},
outIndexes: []int{3},
},
}
for _, m := range metrics {
testAppendSamples(p, &clientmodel.Sample{
Value: 0,
Timestamp: 0,
Metric: m,
}, t)
}
for i, s := range scenarios {
actualFps, err := p.GetFingerprintsForLabelMatchers(s.in)
if err != nil {
t.Fatalf("%d. Couldn't get fingerprints for label matchers: %v", i, err)
}
expectedFps := clientmodel.Fingerprints{}
for _, i := range s.outIndexes {
fp := &clientmodel.Fingerprint{}
fp.LoadFromMetric(metrics[i])
expectedFps = append(expectedFps, fp)
}
sort.Sort(actualFps)
sort.Sort(expectedFps)
if len(actualFps) != len(expectedFps) {
t.Fatalf("%d. Got %d fingerprints; want %d", i, len(actualFps), len(expectedFps))
}
for j, actualFp := range actualFps {
if !actualFp.Equal(expectedFps[j]) {
t.Fatalf("%d.%d. Got fingerprint %v; want %v", i, j, actualFp, expectedFps[j])
}
}
}
}
func GetLabelValuesForLabelNameTests(p metric.Persistence, t testing.TB) {
testAppendSamples(p, &clientmodel.Sample{
Value: 0,
Timestamp: 0,
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
"request_type": "create",
"result": "success",
},
}, t)
testAppendSamples(p, &clientmodel.Sample{
Value: 0,
Timestamp: 0,
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "my_metric",
"request_type": "delete",
"outcome": "failure",
},
}, t)
expectedIndex := map[clientmodel.LabelName]clientmodel.LabelValues{
clientmodel.MetricNameLabel: {"my_metric"},
"request_type": {"create", "delete"},
"result": {"success"},
"outcome": {"failure"},
}
for name, expected := range expectedIndex {
actual, err := p.GetLabelValuesForLabelName(name)
if err != nil {
t.Fatalf("Error getting values for label %s: %v", name, err)
}
if len(actual) != len(expected) {
t.Fatalf("Number of values don't match for label %s: got %d; want %d", name, len(actual), len(expected))
}
for i := range expected {
inActual := false
for _, a := range actual {
if expected[i] == a {
inActual = true
break
}
}
if !inActual {
t.Fatalf("%d. Expected label value %s not in output", i, expected[i])
}
}
}
}
func GetMetricForFingerprintTests(p metric.Persistence, t testing.TB) {
testAppendSamples(p, &clientmodel.Sample{
Value: 0,
Timestamp: 0,
Metric: clientmodel.Metric{
"request_type": "your_mom",
},
}, t)
testAppendSamples(p, &clientmodel.Sample{
Value: 0,
Timestamp: 0,
Metric: clientmodel.Metric{
"request_type": "your_dad",
"one-off": "value",
},
}, t)
result, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{
Type: metric.Equal,
Name: "request_type",
Value: "your_mom",
}})
if err != nil {
t.Error(err)
}
if len(result) != 1 {
t.Errorf("Expected one element.")
}
m, err := p.GetMetricForFingerprint(result[0])
if err != nil {
t.Error(err)
}
if m == nil {
t.Fatal("Did not expect nil.")
}
if len(m) != 1 {
t.Errorf("Expected one-dimensional metric.")
}
if m["request_type"] != "your_mom" {
t.Errorf("Expected metric to match.")
}
result, err = p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{
Type: metric.Equal,
Name: "request_type",
Value: "your_dad",
}})
if err != nil {
t.Error(err)
}
if len(result) != 1 {
t.Errorf("Expected one element.")
}
m, err = p.GetMetricForFingerprint(result[0])
if m == nil {
t.Fatal("Did not expect nil.")
}
if err != nil {
t.Error(err)
}
if len(m) != 2 {
t.Errorf("Expected two-dimensional metric.")
}
if m["request_type"] != "your_dad" {
t.Errorf("Expected metric to match.")
}
if m["one-off"] != "value" {
t.Errorf("Expected metric to match.")
}
// Verify that mutating a returned metric does not result in the mutated
// metric to be returned at the next GetMetricForFingerprint() call.
m["one-off"] = "new value"
m, err = p.GetMetricForFingerprint(result[0])
if m == nil {
t.Fatal("Did not expect nil.")
}
if err != nil {
t.Error(err)
}
if len(m) != 2 {
t.Errorf("Expected two-dimensional metric.")
}
if m["request_type"] != "your_dad" {
t.Errorf("Expected metric to match.")
}
if m["one-off"] != "value" {
t.Errorf("Expected metric to match.")
}
}
func AppendRepeatingValuesTests(p metric.Persistence, t testing.TB) {
m := clientmodel.Metric{
clientmodel.MetricNameLabel: "errors_total",
"controller": "foo",
"operation": "bar",
}
increments := 10
repetitions := 500
for i := 0; i < increments; i++ {
for j := 0; j < repetitions; j++ {
time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
testAppendSamples(p, &clientmodel.Sample{
Value: clientmodel.SampleValue(i),
Timestamp: time,
Metric: m,
}, t)
}
}
v, ok := p.(metric.View)
if !ok {
// It's purely a benchmark for a Persistence that is not viewable.
return
}
matchers := labelMatchersFromLabelSet(clientmodel.LabelSet{
clientmodel.MetricNameLabel: "errors_total",
"controller": "foo",
"operation": "bar",
})
for i := 0; i < increments; i++ {
for j := 0; j < repetitions; j++ {
fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
t.Fatal(err)
}
if len(fingerprints) != 1 {
t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints))
}
time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
samples := v.GetValueAtTime(fingerprints[0], time)
if len(samples) == 0 {
t.Fatal("expected at least one sample.")
}
expected := clientmodel.SampleValue(i)
for _, sample := range samples {
if sample.Value != expected {
t.Fatalf("expected %v value, got %v", expected, sample.Value)
}
}
}
}
}
func AppendsRepeatingValuesTests(p metric.Persistence, t testing.TB) {
m := clientmodel.Metric{
clientmodel.MetricNameLabel: "errors_total",
"controller": "foo",
"operation": "bar",
}
increments := 10
repetitions := 500
s := clientmodel.Samples{}
for i := 0; i < increments; i++ {
for j := 0; j < repetitions; j++ {
time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
s = append(s, &clientmodel.Sample{
Value: clientmodel.SampleValue(i),
Timestamp: time,
Metric: m,
})
}
}
p.AppendSamples(s)
v, ok := p.(metric.View)
if !ok {
// It's purely a benchmark for a MetricPersistance that is not viewable.
return
}
matchers := labelMatchersFromLabelSet(clientmodel.LabelSet{
clientmodel.MetricNameLabel: "errors_total",
"controller": "foo",
"operation": "bar",
})
for i := 0; i < increments; i++ {
for j := 0; j < repetitions; j++ {
fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
t.Fatal(err)
}
if len(fingerprints) != 1 {
t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints))
}
time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second)
samples := v.GetValueAtTime(fingerprints[0], time)
if len(samples) == 0 {
t.Fatal("expected at least one sample.")
}
expected := clientmodel.SampleValue(i)
for _, sample := range samples {
if sample.Value != expected {
t.Fatalf("expected %v value, got %v", expected, sample.Value)
}
}
}
}
}
// Test Definitions Below
var testLevelDBGetFingerprintsForLabelSet = buildLevelDBTestPersistence("get_fingerprints_for_labelset", GetFingerprintsForLabelSetTests)
func TestLevelDBGetFingerprintsForLabelSet(t *testing.T) {
testLevelDBGetFingerprintsForLabelSet(t)
}
func BenchmarkLevelDBGetFingerprintsForLabelSet(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBGetFingerprintsForLabelSet(b)
}
}
var testLevelDBGetLabelValuesForLabelName = buildLevelDBTestPersistence("get_label_values_for_labelname", GetLabelValuesForLabelNameTests)
func TestLevelDBGetFingerprintsForLabelName(t *testing.T) {
testLevelDBGetLabelValuesForLabelName(t)
}
func BenchmarkLevelDBGetLabelValuesForLabelName(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBGetLabelValuesForLabelName(b)
}
}
var testLevelDBGetMetricForFingerprint = buildLevelDBTestPersistence("get_metric_for_fingerprint", GetMetricForFingerprintTests)
func TestLevelDBGetMetricForFingerprint(t *testing.T) {
testLevelDBGetMetricForFingerprint(t)
}
func BenchmarkLevelDBGetMetricForFingerprint(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBGetMetricForFingerprint(b)
}
}
var testLevelDBAppendRepeatingValues = buildLevelDBTestPersistence("append_repeating_values", AppendRepeatingValuesTests)
func TestLevelDBAppendRepeatingValues(t *testing.T) {
testLevelDBAppendRepeatingValues(t)
}
func BenchmarkLevelDBAppendRepeatingValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBAppendRepeatingValues(b)
}
}
var testLevelDBAppendsRepeatingValues = buildLevelDBTestPersistence("appends_repeating_values", AppendsRepeatingValuesTests)
func TestLevelDBAppendsRepeatingValues(t *testing.T) {
testLevelDBAppendsRepeatingValues(t)
}
func BenchmarkLevelDBAppendsRepeatingValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBAppendsRepeatingValues(b)
}
}
var testMemoryGetFingerprintsForLabelSet = buildMemoryTestPersistence(GetFingerprintsForLabelSetTests)
func TestMemoryGetFingerprintsForLabelSet(t *testing.T) {
testMemoryGetFingerprintsForLabelSet(t)
}
func BenchmarkMemoryGetFingerprintsForLabelSet(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetFingerprintsForLabelSet(b)
}
}
var testMemoryGetLabelValuesForLabelName = buildMemoryTestPersistence(GetLabelValuesForLabelNameTests)
func TestMemoryGetLabelValuesForLabelName(t *testing.T) {
testMemoryGetLabelValuesForLabelName(t)
}
func BenchmarkMemoryGetLabelValuesForLabelName(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetLabelValuesForLabelName(b)
}
}
var testMemoryGetMetricForFingerprint = buildMemoryTestPersistence(GetMetricForFingerprintTests)
func TestMemoryGetMetricForFingerprint(t *testing.T) {
testMemoryGetMetricForFingerprint(t)
}
func BenchmarkMemoryGetMetricForFingerprint(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetMetricForFingerprint(b)
}
}
var testMemoryAppendRepeatingValues = buildMemoryTestPersistence(AppendRepeatingValuesTests)
func TestMemoryAppendRepeatingValues(t *testing.T) {
testMemoryAppendRepeatingValues(t)
}
func BenchmarkMemoryAppendRepeatingValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryAppendRepeatingValues(b)
}
}

View file

@ -1,212 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"time"
"github.com/prometheus/prometheus/utility"
clientmodel "github.com/prometheus/client_golang/model"
dto "github.com/prometheus/prometheus/model/generated"
)
type dtoSampleKeyList struct {
l utility.FreeList
}
func newDtoSampleKeyList(cap int) *dtoSampleKeyList {
return &dtoSampleKeyList{
l: utility.NewFreeList(cap),
}
}
func (l *dtoSampleKeyList) Get() (*dto.SampleKey, bool) {
if v, ok := l.l.Get(); ok {
return v.(*dto.SampleKey), ok
}
return &dto.SampleKey{}, false
}
func (l *dtoSampleKeyList) Give(v *dto.SampleKey) bool {
v.Reset()
return l.l.Give(v)
}
func (l *dtoSampleKeyList) Close() {
l.l.Close()
}
type sampleKeyList struct {
l utility.FreeList
}
var defaultSampleKey = &SampleKey{}
func newSampleKeyList(cap int) *sampleKeyList {
return &sampleKeyList{
l: utility.NewFreeList(cap),
}
}
func (l *sampleKeyList) Get() (*SampleKey, bool) {
if v, ok := l.l.Get(); ok {
return v.(*SampleKey), ok
}
return &SampleKey{}, false
}
func (l *sampleKeyList) Give(v *SampleKey) bool {
*v = *defaultSampleKey
return l.l.Give(v)
}
func (l *sampleKeyList) Close() {
l.l.Close()
}
type valueAtTimeList struct {
l utility.FreeList
}
func (l *valueAtTimeList) Get(fp *clientmodel.Fingerprint, time clientmodel.Timestamp) *getValuesAtTimeOp {
var op *getValuesAtTimeOp
v, ok := l.l.Get()
if ok {
op = v.(*getValuesAtTimeOp)
} else {
op = &getValuesAtTimeOp{}
}
op.fp = *fp
op.current = time
return op
}
var pGetValuesAtTimeOp = &getValuesAtTimeOp{}
func (l *valueAtTimeList) Give(v *getValuesAtTimeOp) bool {
*v = *pGetValuesAtTimeOp
return l.l.Give(v)
}
func newValueAtTimeList(cap int) *valueAtTimeList {
return &valueAtTimeList{
l: utility.NewFreeList(cap),
}
}
type valueAtIntervalList struct {
l utility.FreeList
}
func (l *valueAtIntervalList) Get(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval time.Duration) *getValuesAtIntervalOp {
var op *getValuesAtIntervalOp
v, ok := l.l.Get()
if ok {
op = v.(*getValuesAtIntervalOp)
} else {
op = &getValuesAtIntervalOp{}
}
op.fp = *fp
op.current = from
op.through = through
op.interval = interval
return op
}
var pGetValuesAtIntervalOp = &getValuesAtIntervalOp{}
func (l *valueAtIntervalList) Give(v *getValuesAtIntervalOp) bool {
*v = *pGetValuesAtIntervalOp
return l.l.Give(v)
}
func newValueAtIntervalList(cap int) *valueAtIntervalList {
return &valueAtIntervalList{
l: utility.NewFreeList(cap),
}
}
type valueAlongRangeList struct {
l utility.FreeList
}
func (l *valueAlongRangeList) Get(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp) *getValuesAlongRangeOp {
var op *getValuesAlongRangeOp
v, ok := l.l.Get()
if ok {
op = v.(*getValuesAlongRangeOp)
} else {
op = &getValuesAlongRangeOp{}
}
op.fp = *fp
op.current = from
op.through = through
return op
}
var pGetValuesAlongRangeOp = &getValuesAlongRangeOp{}
func (l *valueAlongRangeList) Give(v *getValuesAlongRangeOp) bool {
*v = *pGetValuesAlongRangeOp
return l.l.Give(v)
}
func newValueAlongRangeList(cap int) *valueAlongRangeList {
return &valueAlongRangeList{
l: utility.NewFreeList(cap),
}
}
type valueAtIntervalAlongRangeList struct {
l utility.FreeList
}
func (l *valueAtIntervalAlongRangeList) Get(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration) *getValueRangeAtIntervalOp {
var op *getValueRangeAtIntervalOp
v, ok := l.l.Get()
if ok {
op = v.(*getValueRangeAtIntervalOp)
} else {
op = &getValueRangeAtIntervalOp{}
}
op.fp = *fp
op.current = from
op.rangeThrough = from.Add(rangeDuration)
op.rangeDuration = rangeDuration
op.interval = interval
op.through = through
return op
}
var pGetValueRangeAtIntervalOp = &getValueRangeAtIntervalOp{}
func (l *valueAtIntervalAlongRangeList) Give(v *getValueRangeAtIntervalOp) bool {
*v = *pGetValueRangeAtIntervalOp
return l.l.Give(v)
}
func newValueAtIntervalAlongRangeList(cap int) *valueAtIntervalAlongRangeList {
return &valueAtIntervalAlongRangeList{
l: utility.NewFreeList(cap),
}
}

View file

@ -1,37 +0,0 @@
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
"github.com/prometheus/client_golang/model"
)
// TestValueAtTimeListGet tests if the timestamp is set properly in the op
// retrieved from the free list and if the 'consumed' member is zeroed properly.
func TestValueAtTimeListGet(t *testing.T) {
l := newValueAtTimeList(1)
op := l.Get(&model.Fingerprint{}, 42)
op.consumed = true
l.Give(op)
op2 := l.Get(&model.Fingerprint{}, 4711)
if op2.Consumed() {
t.Error("Op retrieved from freelist is already consumed.")
}
if got, expected := op2.CurrentTime(), model.Timestamp(4711); got != expected {
t.Errorf("op2.CurrentTime() = %d; want %d.", got, expected)
}
}

View file

@ -1,689 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"io"
"sort"
"sync"
"code.google.com/p/goprotobuf/proto"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility"
dto "github.com/prometheus/prometheus/model/generated"
)
// FingerprintMetricMapping is an in-memory map of Fingerprints to Metrics.
type FingerprintMetricMapping map[clientmodel.Fingerprint]clientmodel.Metric
// FingerprintMetricIndex models a database mapping Fingerprints to Metrics.
type FingerprintMetricIndex interface {
raw.Database
raw.Pruner
IndexBatch(FingerprintMetricMapping) error
Lookup(*clientmodel.Fingerprint) (m clientmodel.Metric, ok bool, err error)
}
// LevelDBFingerprintMetricIndex implements FingerprintMetricIndex using
// leveldb.
type LevelDBFingerprintMetricIndex struct {
*leveldb.LevelDBPersistence
}
// IndexBatch implements FingerprintMetricIndex.
func (i *LevelDBFingerprintMetricIndex) IndexBatch(mapping FingerprintMetricMapping) error {
b := leveldb.NewBatch()
defer b.Close()
for f, m := range mapping {
k := &dto.Fingerprint{}
dumpFingerprint(k, &f)
v := &dto.Metric{}
dumpMetric(v, m)
b.Put(k, v)
}
return i.LevelDBPersistence.Commit(b)
}
// Lookup implements FingerprintMetricIndex.
func (i *LevelDBFingerprintMetricIndex) Lookup(f *clientmodel.Fingerprint) (m clientmodel.Metric, ok bool, err error) {
k := &dto.Fingerprint{}
dumpFingerprint(k, f)
v := &dto.Metric{}
if ok, err := i.LevelDBPersistence.Get(k, v); !ok {
return nil, false, nil
} else if err != nil {
return nil, false, err
}
m = clientmodel.Metric{}
for _, pair := range v.LabelPair {
m[clientmodel.LabelName(pair.GetName())] = clientmodel.LabelValue(pair.GetValue())
}
return m, true, nil
}
// NewLevelDBFingerprintMetricIndex returns a LevelDBFingerprintMetricIndex
// object ready to use.
func NewLevelDBFingerprintMetricIndex(o leveldb.LevelDBOptions) (*LevelDBFingerprintMetricIndex, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBFingerprintMetricIndex{
LevelDBPersistence: s,
}, nil
}
// LabelNameLabelValuesMapping is an in-memory map of LabelNames to
// LabelValues.
type LabelNameLabelValuesMapping map[clientmodel.LabelName]clientmodel.LabelValues
// LabelNameLabelValuesIndex models a database mapping LabelNames to
// LabelValues.
type LabelNameLabelValuesIndex interface {
raw.Database
raw.Pruner
IndexBatch(LabelNameLabelValuesMapping) error
Lookup(clientmodel.LabelName) (values clientmodel.LabelValues, ok bool, err error)
Has(clientmodel.LabelName) (ok bool, err error)
}
// LevelDBLabelNameLabelValuesIndex implements LabelNameLabelValuesIndex using
// leveldb.
type LevelDBLabelNameLabelValuesIndex struct {
*leveldb.LevelDBPersistence
}
// IndexBatch implements LabelNameLabelValuesIndex.
func (i *LevelDBLabelNameLabelValuesIndex) IndexBatch(b LabelNameLabelValuesMapping) error {
batch := leveldb.NewBatch()
defer batch.Close()
for labelName, labelValues := range b {
sort.Sort(labelValues)
key := &dto.LabelName{
Name: proto.String(string(labelName)),
}
value := &dto.LabelValueCollection{}
value.Member = make([]string, 0, len(labelValues))
for _, labelValue := range labelValues {
value.Member = append(value.Member, string(labelValue))
}
batch.Put(key, value)
}
return i.LevelDBPersistence.Commit(batch)
}
// Lookup implements LabelNameLabelValuesIndex.
func (i *LevelDBLabelNameLabelValuesIndex) Lookup(l clientmodel.LabelName) (values clientmodel.LabelValues, ok bool, err error) {
k := &dto.LabelName{}
dumpLabelName(k, l)
v := &dto.LabelValueCollection{}
ok, err = i.LevelDBPersistence.Get(k, v)
if err != nil {
return nil, false, err
}
if !ok {
return nil, false, nil
}
for _, m := range v.Member {
values = append(values, clientmodel.LabelValue(m))
}
return values, true, nil
}
// Has implements LabelNameLabelValuesIndex.
func (i *LevelDBLabelNameLabelValuesIndex) Has(l clientmodel.LabelName) (ok bool, err error) {
return i.LevelDBPersistence.Has(&dto.LabelName{
Name: proto.String(string(l)),
})
}
// NewLevelDBLabelNameLabelValuesIndex returns a LevelDBLabelNameLabelValuesIndex
// ready to use.
func NewLevelDBLabelNameLabelValuesIndex(o leveldb.LevelDBOptions) (*LevelDBLabelNameLabelValuesIndex, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBLabelNameLabelValuesIndex{
LevelDBPersistence: s,
}, nil
}
// LabelPairFingerprintMapping is an in-memory map of LabelPairs to
// Fingerprints.
type LabelPairFingerprintMapping map[metric.LabelPair]clientmodel.Fingerprints
// LabelPairFingerprintIndex models a database mapping LabelPairs to
// Fingerprints.
type LabelPairFingerprintIndex interface {
raw.Database
raw.ForEacher
raw.Pruner
IndexBatch(LabelPairFingerprintMapping) error
Lookup(*metric.LabelPair) (m clientmodel.Fingerprints, ok bool, err error)
Has(*metric.LabelPair) (ok bool, err error)
}
// LevelDBLabelPairFingerprintIndex implements LabelPairFingerprintIndex using
// leveldb.
type LevelDBLabelPairFingerprintIndex struct {
*leveldb.LevelDBPersistence
}
// IndexBatch implements LabelPairFingerprintMapping.
func (i *LevelDBLabelPairFingerprintIndex) IndexBatch(m LabelPairFingerprintMapping) error {
batch := leveldb.NewBatch()
defer batch.Close()
for pair, fps := range m {
sort.Sort(fps)
key := &dto.LabelPair{
Name: proto.String(string(pair.Name)),
Value: proto.String(string(pair.Value)),
}
value := &dto.FingerprintCollection{}
for _, fp := range fps {
f := &dto.Fingerprint{}
dumpFingerprint(f, fp)
value.Member = append(value.Member, f)
}
batch.Put(key, value)
}
return i.LevelDBPersistence.Commit(batch)
}
// Lookup implements LabelPairFingerprintMapping.
func (i *LevelDBLabelPairFingerprintIndex) Lookup(p *metric.LabelPair) (m clientmodel.Fingerprints, ok bool, err error) {
k := &dto.LabelPair{
Name: proto.String(string(p.Name)),
Value: proto.String(string(p.Value)),
}
v := &dto.FingerprintCollection{}
ok, err = i.LevelDBPersistence.Get(k, v)
if !ok {
return nil, false, nil
}
if err != nil {
return nil, false, err
}
for _, pair := range v.Member {
fp := &clientmodel.Fingerprint{}
loadFingerprint(fp, pair)
m = append(m, fp)
}
return m, true, nil
}
// Has implements LabelPairFingerprintMapping.
func (i *LevelDBLabelPairFingerprintIndex) Has(p *metric.LabelPair) (ok bool, err error) {
k := &dto.LabelPair{
Name: proto.String(string(p.Name)),
Value: proto.String(string(p.Value)),
}
return i.LevelDBPersistence.Has(k)
}
// NewLevelDBLabelSetFingerprintIndex returns a LevelDBLabelPairFingerprintIndex
// object ready to use.
func NewLevelDBLabelSetFingerprintIndex(o leveldb.LevelDBOptions) (*LevelDBLabelPairFingerprintIndex, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBLabelPairFingerprintIndex{
LevelDBPersistence: s,
}, nil
}
// MetricMembershipIndex models a database tracking the existence of Metrics.
type MetricMembershipIndex interface {
raw.Database
raw.Pruner
IndexBatch(FingerprintMetricMapping) error
Has(clientmodel.Metric) (ok bool, err error)
}
// LevelDBMetricMembershipIndex implements MetricMembershipIndex using leveldb.
type LevelDBMetricMembershipIndex struct {
*leveldb.LevelDBPersistence
}
var existenceIdentity = &dto.MembershipIndexValue{}
// IndexBatch implements MetricMembershipIndex.
func (i *LevelDBMetricMembershipIndex) IndexBatch(b FingerprintMetricMapping) error {
batch := leveldb.NewBatch()
defer batch.Close()
for _, m := range b {
k := &dto.Metric{}
dumpMetric(k, m)
batch.Put(k, existenceIdentity)
}
return i.LevelDBPersistence.Commit(batch)
}
// Has implements MetricMembershipIndex.
func (i *LevelDBMetricMembershipIndex) Has(m clientmodel.Metric) (ok bool, err error) {
k := &dto.Metric{}
dumpMetric(k, m)
return i.LevelDBPersistence.Has(k)
}
// NewLevelDBMetricMembershipIndex returns a LevelDBMetricMembershipIndex object
// ready to use.
func NewLevelDBMetricMembershipIndex(o leveldb.LevelDBOptions) (*LevelDBMetricMembershipIndex, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBMetricMembershipIndex{
LevelDBPersistence: s,
}, nil
}
// MetricIndexer indexes facets of a clientmodel.Metric.
type MetricIndexer interface {
// IndexMetric makes no assumptions about the concurrency safety of the
// underlying implementer.
IndexMetrics(FingerprintMetricMapping) error
}
// IndexerObserver listens and receives changes to a given
// FingerprintMetricMapping.
type IndexerObserver interface {
Observe(FingerprintMetricMapping) error
}
// IndexerProxy receives IndexMetric requests and proxies them to the underlying
// MetricIndexer. Upon success of the underlying receiver, the registered
// IndexObservers are called serially.
//
// If an error occurs in the underlying MetricIndexer or any of the observers,
// this proxy will not work any further and return the offending error in this
// call or any subsequent ones.
type IndexerProxy struct {
err error
i MetricIndexer
observers []IndexerObserver
}
// IndexMetrics proxies the given FingerprintMetricMapping to the underlying
// MetricIndexer and calls all registered observers with it.
func (p *IndexerProxy) IndexMetrics(b FingerprintMetricMapping) error {
if p.err != nil {
return p.err
}
if p.err = p.i.IndexMetrics(b); p.err != nil {
return p.err
}
for _, o := range p.observers {
if p.err = o.Observe(b); p.err != nil {
return p.err
}
}
return nil
}
// Close closes the underlying indexer.
func (p *IndexerProxy) Close() error {
if p.err != nil {
return p.err
}
if closer, ok := p.i.(io.Closer); ok {
p.err = closer.Close()
return p.err
}
return nil
}
// Flush flushes the underlying index requests before closing.
func (p *IndexerProxy) Flush() error {
if p.err != nil {
return p.err
}
if flusher, ok := p.i.(flusher); ok {
p.err = flusher.Flush()
return p.err
}
return nil
}
// NewIndexerProxy builds an IndexerProxy for the given configuration.
func NewIndexerProxy(i MetricIndexer, o ...IndexerObserver) *IndexerProxy {
return &IndexerProxy{
i: i,
observers: o,
}
}
// SynchronizedIndexer provides naive locking for any MetricIndexer.
type SynchronizedIndexer struct {
mu sync.Mutex
i MetricIndexer
}
// IndexMetrics calls IndexMetrics of the wrapped MetricIndexer after acquiring
// a lock.
func (i *SynchronizedIndexer) IndexMetrics(b FingerprintMetricMapping) error {
i.mu.Lock()
defer i.mu.Unlock()
return i.i.IndexMetrics(b)
}
type flusher interface {
Flush() error
}
// Flush calls Flush of the wrapped MetricIndexer after acquiring a lock. If the
// wrapped MetricIndexer has no Flush method, this is a no-op.
func (i *SynchronizedIndexer) Flush() error {
if flusher, ok := i.i.(flusher); ok {
i.mu.Lock()
defer i.mu.Unlock()
return flusher.Flush()
}
return nil
}
// Close calls Close of the wrapped MetricIndexer after acquiring a lock. If the
// wrapped MetricIndexer has no Close method, this is a no-op.
func (i *SynchronizedIndexer) Close() error {
if closer, ok := i.i.(io.Closer); ok {
i.mu.Lock()
defer i.mu.Unlock()
return closer.Close()
}
return nil
}
// NewSynchronizedIndexer returns a SynchronizedIndexer wrapping the given
// MetricIndexer.
func NewSynchronizedIndexer(i MetricIndexer) *SynchronizedIndexer {
return &SynchronizedIndexer{
i: i,
}
}
// BufferedIndexer provides unsynchronized index buffering.
//
// If an error occurs in the underlying MetricIndexer or any of the observers,
// this proxy will not work any further and return the offending error.
type BufferedIndexer struct {
i MetricIndexer
limit int
buf []FingerprintMetricMapping
err error
}
// IndexMetrics writes the entries in the given FingerprintMetricMapping to the
// index.
func (i *BufferedIndexer) IndexMetrics(b FingerprintMetricMapping) error {
if i.err != nil {
return i.err
}
if len(i.buf) < i.limit {
i.buf = append(i.buf, b)
return nil
}
i.err = i.Flush()
return i.err
}
// Flush writes all pending entries to the index.
func (i *BufferedIndexer) Flush() error {
if i.err != nil {
return i.err
}
if len(i.buf) == 0 {
return nil
}
superset := FingerprintMetricMapping{}
for _, b := range i.buf {
for fp, m := range b {
if _, ok := superset[fp]; ok {
continue
}
superset[fp] = m
}
}
i.buf = make([]FingerprintMetricMapping, 0, i.limit)
i.err = i.i.IndexMetrics(superset)
return i.err
}
// Close flushes and closes the underlying buffer.
func (i *BufferedIndexer) Close() error {
if err := i.Flush(); err != nil {
return err
}
if closer, ok := i.i.(io.Closer); ok {
return closer.Close()
}
return nil
}
// NewBufferedIndexer returns a BufferedIndexer ready to use.
func NewBufferedIndexer(i MetricIndexer, limit int) *BufferedIndexer {
return &BufferedIndexer{
i: i,
limit: limit,
buf: make([]FingerprintMetricMapping, 0, limit),
}
}
// TotalIndexer is a MetricIndexer that indexes all standard facets of a metric
// that a user or the Prometheus subsystem would want to query against:
//
// "<Label Name>" -> {Fingerprint, ...}
// "<Label Name> <Label Value>" -> {Fingerprint, ...}
//
// "<Fingerprint>" -> Metric
//
// "<Metric>" -> Existence Value
//
// This type supports concrete queries but only single writes, and it has no
// locking semantics to enforce this.
type TotalIndexer struct {
FingerprintToMetric FingerprintMetricIndex
LabelNameToLabelValues LabelNameLabelValuesIndex
LabelPairToFingerprint LabelPairFingerprintIndex
MetricMembership MetricMembershipIndex
}
func findUnindexed(i MetricMembershipIndex, b FingerprintMetricMapping) (FingerprintMetricMapping, error) {
out := FingerprintMetricMapping{}
for fp, m := range b {
has, err := i.Has(m)
if err != nil {
return nil, err
}
if !has {
out[fp] = m
}
}
return out, nil
}
func extendLabelNameToLabelValuesIndex(i LabelNameLabelValuesIndex, b FingerprintMetricMapping) (LabelNameLabelValuesMapping, error) {
collection := map[clientmodel.LabelName]utility.Set{}
for _, m := range b {
for l, v := range m {
set, ok := collection[l]
if !ok {
baseValues, _, err := i.Lookup(l)
if err != nil {
return nil, err
}
set = utility.Set{}
for _, baseValue := range baseValues {
set.Add(baseValue)
}
collection[l] = set
}
set.Add(v)
}
}
batch := LabelNameLabelValuesMapping{}
for l, set := range collection {
values := make(clientmodel.LabelValues, 0, len(set))
for e := range set {
val := e.(clientmodel.LabelValue)
values = append(values, val)
}
batch[l] = values
}
return batch, nil
}
func extendLabelPairIndex(i LabelPairFingerprintIndex, b FingerprintMetricMapping) (LabelPairFingerprintMapping, error) {
collection := map[metric.LabelPair]utility.Set{}
for fp, m := range b {
for n, v := range m {
pair := metric.LabelPair{
Name: n,
Value: v,
}
set, ok := collection[pair]
if !ok {
baseFps, _, err := i.Lookup(&pair)
if err != nil {
return nil, err
}
set = utility.Set{}
for _, baseFp := range baseFps {
set.Add(*baseFp)
}
collection[pair] = set
}
set.Add(fp)
}
}
batch := LabelPairFingerprintMapping{}
for pair, set := range collection {
fps := batch[pair]
for element := range set {
fp := element.(clientmodel.Fingerprint)
fps = append(fps, &fp)
}
batch[pair] = fps
}
return batch, nil
}
// IndexMetrics adds the facets of all unindexed metrics found in the given
// FingerprintMetricMapping to the corresponding indices.
func (i *TotalIndexer) IndexMetrics(b FingerprintMetricMapping) error {
unindexed, err := findUnindexed(i.MetricMembership, b)
if err != nil {
return err
}
labelNames, err := extendLabelNameToLabelValuesIndex(i.LabelNameToLabelValues, unindexed)
if err != nil {
return err
}
if err := i.LabelNameToLabelValues.IndexBatch(labelNames); err != nil {
return err
}
labelPairs, err := extendLabelPairIndex(i.LabelPairToFingerprint, unindexed)
if err != nil {
return err
}
if err := i.LabelPairToFingerprint.IndexBatch(labelPairs); err != nil {
return err
}
if err := i.FingerprintToMetric.IndexBatch(unindexed); err != nil {
return err
}
return i.MetricMembership.IndexBatch(unindexed)
}

View file

@ -1,25 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
"github.com/prometheus/prometheus/storage/metric"
)
func TestInterfaceAdherence(t *testing.T) {
var _ metric.Persistence = &LevelDBPersistence{}
var _ metric.Persistence = NewMemorySeriesStorage(MemorySeriesOptions{})
}

View file

@ -1,677 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"flag"
"fmt"
"sync"
"time"
"code.google.com/p/goprotobuf/proto"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility"
dto "github.com/prometheus/prometheus/model/generated"
)
const sortConcurrency = 2
// LevelDBPersistence is a leveldb-backed persistence layer for metrics.
type LevelDBPersistence struct {
CurationRemarks CurationRemarker
FingerprintToMetrics FingerprintMetricIndex
LabelNameToLabelValues LabelNameLabelValuesIndex
LabelPairToFingerprints LabelPairFingerprintIndex
MetricHighWatermarks HighWatermarker
MetricMembershipIndex MetricMembershipIndex
Indexer MetricIndexer
MetricSamples *leveldb.LevelDBPersistence
// The remaining indices will be replaced with generalized interface resolvers:
//
// type FingerprintResolver interface {
// GetFingerprintForMetric(clientmodel.Metric) (*clientmodel.Fingerprint, bool, error)
// GetFingerprintsForLabelMatchers(metric.LabelPair) (clientmodel.Fingerprints, bool, error)
// }
// type MetricResolver interface {
// GetMetricsForFingerprint(clientmodel.Fingerprints) (FingerprintMetricMapping, bool, error)
// }
}
var (
leveldbChunkSize = flag.Int("leveldbChunkSize", 200, "Maximum number of samples stored under one key.")
// These flag values are back of the envelope, though they seem
// sensible. Please re-evaluate based on your own needs.
curationRemarksCacheSize = flag.Int("curationRemarksCacheSize", 5*1024*1024, "The size for the curation remarks cache (bytes).")
fingerprintsToLabelPairCacheSize = flag.Int("fingerprintsToLabelPairCacheSizeBytes", 25*1024*1024, "The size for the fingerprint to label pair index (bytes).")
highWatermarkCacheSize = flag.Int("highWatermarksByFingerprintSizeBytes", 5*1024*1024, "The size for the metric high watermarks (bytes).")
labelNameToLabelValuesCacheSize = flag.Int("labelNameToLabelValuesCacheSizeBytes", 25*1024*1024, "The size for the label name to label values index (bytes).")
labelPairToFingerprintsCacheSize = flag.Int("labelPairToFingerprintsCacheSizeBytes", 25*1024*1024, "The size for the label pair to metric fingerprint index (bytes).")
metricMembershipIndexCacheSize = flag.Int("metricMembershipCacheSizeBytes", 5*1024*1024, "The size for the metric membership index (bytes).")
samplesByFingerprintCacheSize = flag.Int("samplesByFingerprintCacheSizeBytes", 50*1024*1024, "The size for the samples database (bytes).")
)
type leveldbOpener func()
// Close closes all the underlying persistence layers. It implements the
// Persistence interface.
func (l *LevelDBPersistence) Close() {
var persistences = []raw.Database{
l.CurationRemarks,
l.FingerprintToMetrics,
l.LabelNameToLabelValues,
l.LabelPairToFingerprints,
l.MetricHighWatermarks,
l.MetricMembershipIndex,
l.MetricSamples,
}
closerGroup := sync.WaitGroup{}
for _, c := range persistences {
closerGroup.Add(1)
go func(c raw.Database) {
if c != nil {
if err := c.Close(); err != nil {
glog.Error("Error closing persistence: ", err)
}
}
closerGroup.Done()
}(c)
}
closerGroup.Wait()
}
// NewLevelDBPersistence returns a LevelDBPersistence object ready
// to use.
func NewLevelDBPersistence(baseDirectory string) (*LevelDBPersistence, error) {
workers := utility.NewUncertaintyGroup(7)
emission := &LevelDBPersistence{}
var subsystemOpeners = []struct {
name string
opener leveldbOpener
}{
{
"Label Names and Value Pairs by Fingerprint",
func() {
var err error
emission.FingerprintToMetrics, err = NewLevelDBFingerprintMetricIndex(
leveldb.LevelDBOptions{
Name: "Metrics by Fingerprint",
Purpose: "Index",
Path: baseDirectory + "/label_name_and_value_pairs_by_fingerprint",
CacheSizeBytes: *fingerprintsToLabelPairCacheSize,
},
)
workers.MayFail(err)
},
},
{
"Samples by Fingerprint",
func() {
var err error
emission.MetricSamples, err = leveldb.NewLevelDBPersistence(leveldb.LevelDBOptions{
Name: "Samples",
Purpose: "Timeseries",
Path: baseDirectory + "/samples_by_fingerprint",
CacheSizeBytes: *fingerprintsToLabelPairCacheSize,
})
workers.MayFail(err)
},
},
{
"High Watermarks by Fingerprint",
func() {
var err error
emission.MetricHighWatermarks, err = NewLevelDBHighWatermarker(
leveldb.LevelDBOptions{
Name: "High Watermarks",
Purpose: "The youngest sample in the database per metric.",
Path: baseDirectory + "/high_watermarks_by_fingerprint",
CacheSizeBytes: *highWatermarkCacheSize,
},
)
workers.MayFail(err)
},
},
{
"Fingerprints by Label Name",
func() {
var err error
emission.LabelNameToLabelValues, err = NewLevelDBLabelNameLabelValuesIndex(
leveldb.LevelDBOptions{
Name: "Label Values by Label Name",
Purpose: "Index",
Path: baseDirectory + "/label_values_by_label_name",
CacheSizeBytes: *labelNameToLabelValuesCacheSize,
},
)
workers.MayFail(err)
},
},
{
"Fingerprints by Label Name and Value Pair",
func() {
var err error
emission.LabelPairToFingerprints, err = NewLevelDBLabelSetFingerprintIndex(
leveldb.LevelDBOptions{
Name: "Fingerprints by Label Pair",
Purpose: "Index",
Path: baseDirectory + "/fingerprints_by_label_name_and_value_pair",
CacheSizeBytes: *labelPairToFingerprintsCacheSize,
},
)
workers.MayFail(err)
},
},
{
"Metric Membership Index",
func() {
var err error
emission.MetricMembershipIndex, err = NewLevelDBMetricMembershipIndex(
leveldb.LevelDBOptions{
Name: "Metric Membership",
Purpose: "Index",
Path: baseDirectory + "/metric_membership_index",
CacheSizeBytes: *metricMembershipIndexCacheSize,
},
)
workers.MayFail(err)
},
},
{
"Sample Curation Remarks",
func() {
var err error
emission.CurationRemarks, err = NewLevelDBCurationRemarker(
leveldb.LevelDBOptions{
Name: "Sample Curation Remarks",
Purpose: "Ledger of Progress for Various Curators",
Path: baseDirectory + "/curation_remarks",
CacheSizeBytes: *curationRemarksCacheSize,
},
)
workers.MayFail(err)
},
},
}
for _, subsystem := range subsystemOpeners {
opener := subsystem.opener
go opener()
}
if !workers.Wait() {
for _, err := range workers.Errors() {
glog.Error("Could not open storage: ", err)
}
return nil, fmt.Errorf("unable to open metric persistence")
}
emission.Indexer = &TotalIndexer{
FingerprintToMetric: emission.FingerprintToMetrics,
LabelNameToLabelValues: emission.LabelNameToLabelValues,
LabelPairToFingerprint: emission.LabelPairToFingerprints,
MetricMembership: emission.MetricMembershipIndex,
}
return emission, nil
}
// AppendSample implements the Persistence interface.
func (l *LevelDBPersistence) AppendSample(sample *clientmodel.Sample) (err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, appendSample)
}(time.Now())
err = l.AppendSamples(clientmodel.Samples{sample})
return
}
// groupByFingerprint collects all of the provided samples and groups them
// together by their respective metric fingerprint.
func groupByFingerprint(samples clientmodel.Samples) map[clientmodel.Fingerprint]clientmodel.Samples {
fingerprintToSamples := map[clientmodel.Fingerprint]clientmodel.Samples{}
for _, sample := range samples {
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromMetric(sample.Metric)
samples := fingerprintToSamples[*fingerprint]
samples = append(samples, sample)
fingerprintToSamples[*fingerprint] = samples
}
return fingerprintToSamples
}
func (l *LevelDBPersistence) refreshHighWatermarks(groups map[clientmodel.Fingerprint]clientmodel.Samples) (err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, refreshHighWatermarks)
}(time.Now())
b := FingerprintHighWatermarkMapping{}
for fp, ss := range groups {
if len(ss) == 0 {
continue
}
b[fp] = ss[len(ss)-1].Timestamp
}
return l.MetricHighWatermarks.UpdateBatch(b)
}
// AppendSamples appends the given Samples to the database and indexes them.
func (l *LevelDBPersistence) AppendSamples(samples clientmodel.Samples) (err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, appendSamples)
}(time.Now())
fingerprintToSamples := groupByFingerprint(samples)
indexErrChan := make(chan error, 1)
watermarkErrChan := make(chan error, 1)
go func(groups map[clientmodel.Fingerprint]clientmodel.Samples) {
metrics := FingerprintMetricMapping{}
for fingerprint, samples := range groups {
metrics[fingerprint] = samples[0].Metric
}
indexErrChan <- l.Indexer.IndexMetrics(metrics)
}(fingerprintToSamples)
go func(groups map[clientmodel.Fingerprint]clientmodel.Samples) {
watermarkErrChan <- l.refreshHighWatermarks(groups)
}(fingerprintToSamples)
samplesBatch := leveldb.NewBatch()
defer samplesBatch.Close()
key := &SampleKey{}
keyDto := &dto.SampleKey{}
values := make(metric.Values, 0, *leveldbChunkSize)
for fingerprint, group := range fingerprintToSamples {
for {
values := values[:0]
lengthOfGroup := len(group)
if lengthOfGroup == 0 {
break
}
take := *leveldbChunkSize
if lengthOfGroup < take {
take = lengthOfGroup
}
chunk := group[0:take]
group = group[take:lengthOfGroup]
key.Fingerprint = &fingerprint
key.FirstTimestamp = chunk[0].Timestamp
key.LastTimestamp = chunk[take-1].Timestamp
key.SampleCount = uint32(take)
key.Dump(keyDto)
for _, sample := range chunk {
values = append(values, metric.SamplePair{
Timestamp: sample.Timestamp,
Value: sample.Value,
})
}
val := marshalValues(values, nil)
samplesBatch.PutRaw(keyDto, val)
}
}
err = l.MetricSamples.Commit(samplesBatch)
if err != nil {
return
}
err = <-indexErrChan
if err != nil {
return
}
err = <-watermarkErrChan
if err != nil {
return
}
return
}
func extractSampleKey(i leveldb.Iterator) (*SampleKey, error) {
k := &dto.SampleKey{}
if err := i.Key(k); err != nil {
return nil, err
}
key := &SampleKey{}
key.Load(k)
return key, nil
}
func (l *LevelDBPersistence) hasIndexMetric(m clientmodel.Metric) (value bool, err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, hasIndexMetric)
}(time.Now())
return l.MetricMembershipIndex.Has(m)
}
// GetFingerprintsForLabelMatchers returns the Fingerprints for the given
// LabelMatchers by querying the underlying LabelPairFingerprintIndex and
// possibly the LabelNameLabelValuesIndex for each matcher. It implements the
// Persistence interface.
func (l *LevelDBPersistence) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) (fps clientmodel.Fingerprints, err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, getFingerprintsForLabelMatchers)
}(time.Now())
sets := []utility.Set{}
for _, matcher := range labelMatchers {
set := utility.Set{}
switch matcher.Type {
case metric.Equal:
fps, _, err := l.LabelPairToFingerprints.Lookup(&metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
})
if err != nil {
return nil, err
}
for _, fp := range fps {
set.Add(*fp)
}
default:
values, err := l.GetLabelValuesForLabelName(matcher.Name)
if err != nil {
return nil, err
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil, nil
}
for _, v := range matches {
fps, _, err := l.LabelPairToFingerprints.Lookup(&metric.LabelPair{
Name: matcher.Name,
Value: v,
})
if err != nil {
return nil, err
}
for _, fp := range fps {
set.Add(*fp)
}
}
}
sets = append(sets, set)
}
numberOfSets := len(sets)
if numberOfSets == 0 {
return nil, nil
}
base := sets[0]
for i := 1; i < numberOfSets; i++ {
base = base.Intersection(sets[i])
}
for _, e := range base.Elements() {
fingerprint := e.(clientmodel.Fingerprint)
fps = append(fps, &fingerprint)
}
return fps, nil
}
// GetLabelValuesForLabelName returns the LabelValues for the given LabelName
// from the underlying LabelNameLabelValuesIndex. It implements the
// Persistence interface.
func (l *LevelDBPersistence) GetLabelValuesForLabelName(labelName clientmodel.LabelName) (clientmodel.LabelValues, error) {
var err error
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, getLabelValuesForLabelName)
}(time.Now())
values, _, err := l.LabelNameToLabelValues.Lookup(labelName)
return values, err
}
// GetMetricForFingerprint returns the Metric for the given Fingerprint from the
// underlying FingerprintMetricIndex. It implements the Persistence
// interface.
func (l *LevelDBPersistence) GetMetricForFingerprint(f *clientmodel.Fingerprint) (m clientmodel.Metric, err error) {
defer func(begin time.Time) {
recordOutcome(time.Since(begin), err, getMetricForFingerprint)
}(time.Now())
// TODO(matt): Update signature to work with ok.
m, _, err = l.FingerprintToMetrics.Lookup(f)
return m, nil
}
// GetAllValuesForLabel gets all label values that are associated with the
// provided label name.
func (l *LevelDBPersistence) GetAllValuesForLabel(labelName clientmodel.LabelName) (values clientmodel.LabelValues, err error) {
filter := &LabelNameFilter{
labelName: labelName,
}
labelValuesOp := &CollectLabelValuesOp{}
_, err = l.LabelPairToFingerprints.ForEach(&MetricKeyDecoder{}, filter, labelValuesOp)
if err != nil {
return
}
values = labelValuesOp.labelValues
return
}
// Prune compacts each database's keyspace serially.
//
// Beware that it would probably be imprudent to run this on a live user-facing
// server due to latency implications.
func (l *LevelDBPersistence) Prune() {
l.CurationRemarks.Prune()
l.FingerprintToMetrics.Prune()
l.LabelNameToLabelValues.Prune()
l.LabelPairToFingerprints.Prune()
l.MetricHighWatermarks.Prune()
l.MetricMembershipIndex.Prune()
l.MetricSamples.Prune()
}
// Sizes returns the sum of all sizes of the underlying databases.
func (l *LevelDBPersistence) Sizes() (total uint64, err error) {
size := uint64(0)
if size, err = l.CurationRemarks.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.FingerprintToMetrics.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.LabelNameToLabelValues.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.LabelPairToFingerprints.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.MetricHighWatermarks.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.MetricMembershipIndex.Size(); err != nil {
return 0, err
}
total += size
if size, err = l.MetricSamples.Size(); err != nil {
return 0, err
}
total += size
return total, nil
}
// States returns the DatabaseStates of all underlying databases.
func (l *LevelDBPersistence) States() raw.DatabaseStates {
return raw.DatabaseStates{
l.CurationRemarks.State(),
l.FingerprintToMetrics.State(),
l.LabelNameToLabelValues.State(),
l.LabelPairToFingerprints.State(),
l.MetricHighWatermarks.State(),
l.MetricMembershipIndex.State(),
l.MetricSamples.State(),
}
}
// CollectLabelValuesOp implements storage.RecordOperator. It collects the
// encountered LabelValues in a slice.
type CollectLabelValuesOp struct {
labelValues []clientmodel.LabelValue
}
// Operate implements storage.RecordOperator. 'key' is required to be a
// LabelPair. Its Value is appended to a slice of collected LabelValues.
func (op *CollectLabelValuesOp) Operate(key, value interface{}) (err *storage.OperatorError) {
labelPair := key.(metric.LabelPair)
op.labelValues = append(op.labelValues, labelPair.Value)
return
}
// MetricKeyDecoder implements storage.RecordDecoder for LabelPairs.
type MetricKeyDecoder struct{}
// DecodeKey implements storage.RecordDecoder. It requires 'in' to be a
// LabelPair protobuf. 'out' is a metric.LabelPair.
func (d *MetricKeyDecoder) DecodeKey(in interface{}) (out interface{}, err error) {
unmarshaled := dto.LabelPair{}
err = proto.Unmarshal(in.([]byte), &unmarshaled)
if err != nil {
return
}
out = metric.LabelPair{
Name: clientmodel.LabelName(*unmarshaled.Name),
Value: clientmodel.LabelValue(*unmarshaled.Value),
}
return
}
// DecodeValue implements storage.RecordDecoder. It is a no-op and always
// returns (nil, nil).
func (d *MetricKeyDecoder) DecodeValue(in interface{}) (out interface{}, err error) {
return
}
// MetricSamplesDecoder implements storage.RecordDecoder for SampleKeys.
type MetricSamplesDecoder struct{}
// DecodeKey implements storage.RecordDecoder. It requires 'in' to be a
// SampleKey protobuf. 'out' is a metric.SampleKey.
func (d *MetricSamplesDecoder) DecodeKey(in interface{}) (interface{}, error) {
key := &dto.SampleKey{}
err := proto.Unmarshal(in.([]byte), key)
if err != nil {
return nil, err
}
sampleKey := &SampleKey{}
sampleKey.Load(key)
return sampleKey, nil
}
// DecodeValue implements storage.RecordDecoder. It requires 'in' to be a
// SampleValueSeries protobuf. 'out' is of type metric.Values.
func (d *MetricSamplesDecoder) DecodeValue(in interface{}) (interface{}, error) {
return unmarshalValues(in.([]byte), nil), nil
}
// AcceptAllFilter implements storage.RecordFilter and accepts all records.
type AcceptAllFilter struct{}
// Filter implements storage.RecordFilter. It always returns ACCEPT.
func (d *AcceptAllFilter) Filter(_, _ interface{}) storage.FilterResult {
return storage.Accept
}
// LabelNameFilter implements storage.RecordFilter and filters records matching
// a LabelName.
type LabelNameFilter struct {
labelName clientmodel.LabelName
}
// Filter implements storage.RecordFilter. 'key' is expected to be a
// LabelPair. The result is ACCEPT if the Name of the LabelPair matches the
// LabelName of this LabelNameFilter.
func (f LabelNameFilter) Filter(key, value interface{}) (filterResult storage.FilterResult) {
labelPair, ok := key.(metric.LabelPair)
if ok && labelPair.Name == f.labelName {
return storage.Accept
}
return storage.Skip
}
func recordOutcome(duration time.Duration, err error, op string) {
labels := prometheus.Labels{operation: op}
if err == nil {
labels[result] = success
} else {
labels[result] = failure
}
storageLatency.With(labels).Observe(float64(duration / time.Microsecond))
}

View file

@ -1,74 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"encoding/binary"
"math"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const (
// sampleSize is the number of bytes per sample in marshalled format.
sampleSize = 16
// formatVersion is used as a version marker in the marshalled format.
formatVersion = 1
// formatVersionSize is the number of bytes used by the serialized formatVersion.
formatVersionSize = 1
)
// marshal marshals a group of samples for being written to disk into dest or a
// new slice if dest has insufficient capacity.
func marshalValues(v metric.Values, dest []byte) []byte {
sz := formatVersionSize + len(v)*sampleSize
if cap(dest) < sz {
dest = make([]byte, sz)
} else {
dest = dest[0:sz]
}
dest[0] = formatVersion
for i, val := range v {
offset := formatVersionSize + i*sampleSize
binary.LittleEndian.PutUint64(dest[offset:], uint64(val.Timestamp.Unix()))
binary.LittleEndian.PutUint64(dest[offset+8:], math.Float64bits(float64(val.Value)))
}
return dest
}
// unmarshalValues decodes marshalled samples into dest and returns either dest
// or a new slice containing those values if dest has insufficient capacity.
func unmarshalValues(buf []byte, dest metric.Values) metric.Values {
if buf[0] != formatVersion {
panic("unsupported format version")
}
n := (len(buf) - formatVersionSize) / sampleSize
if cap(dest) < n {
dest = make(metric.Values, n)
} else {
dest = dest[0:n]
}
for i := 0; i < n; i++ {
offset := formatVersionSize + i*sampleSize
dest[i].Timestamp = clientmodel.TimestampFromUnix(int64(binary.LittleEndian.Uint64(buf[offset:])))
dest[i].Value = clientmodel.SampleValue(math.Float64frombits(binary.LittleEndian.Uint64(buf[offset+8:])))
}
return dest
}

View file

@ -1,580 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"sort"
"sync"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility"
)
// An initialSeriesArenaSize of 4*60 allows for one hour's worth of storage per
// metric without any major reallocations - assuming a sample rate of 1 / 15Hz.
const initialSeriesArenaSize = 4 * 60
type stream interface {
add(metric.Values)
clone() metric.Values
getOlderThan(age clientmodel.Timestamp) metric.Values
evictOlderThan(age clientmodel.Timestamp)
size() int
clear()
metric() clientmodel.Metric
getValueAtTime(t clientmodel.Timestamp) metric.Values
getBoundaryValues(in metric.Interval) metric.Values
getRangeValues(in metric.Interval) metric.Values
}
type arrayStream struct {
sync.RWMutex
m clientmodel.Metric
values metric.Values
}
func (s *arrayStream) metric() clientmodel.Metric {
return s.m
}
// add implements the stream interface. This implementation requires both
// s.values and the passed in v to be sorted already. Values in v that have a
// timestamp older than the most recent value in s.values are skipped.
func (s *arrayStream) add(v metric.Values) {
s.Lock()
defer s.Unlock()
// Skip over values that are older than the most recent value in s.
if len(s.values) > 0 {
i := 0
mostRecentTimestamp := s.values[len(s.values)-1].Timestamp
for ; i < len(v) && mostRecentTimestamp > v[i].Timestamp; i++ {
}
if i > 0 {
glog.Warningf(
"Skipped out-of-order values while adding to %#v: %#v",
s.m, v[:i],
)
v = v[i:]
}
}
s.values = append(s.values, v...)
}
func (s *arrayStream) clone() metric.Values {
s.RLock()
defer s.RUnlock()
clone := make(metric.Values, len(s.values))
copy(clone, s.values)
return clone
}
func (s *arrayStream) getOlderThan(t clientmodel.Timestamp) metric.Values {
s.RLock()
defer s.RUnlock()
finder := func(i int) bool {
return s.values[i].Timestamp.After(t)
}
i := sort.Search(len(s.values), finder)
return s.values[:i]
}
func (s *arrayStream) evictOlderThan(t clientmodel.Timestamp) {
s.Lock()
defer s.Unlock()
finder := func(i int) bool {
return s.values[i].Timestamp.After(t)
}
i := sort.Search(len(s.values), finder)
s.values = s.values[i:]
}
func (s *arrayStream) getValueAtTime(t clientmodel.Timestamp) metric.Values {
s.RLock()
defer s.RUnlock()
// BUG(all): May be avenues for simplification.
l := len(s.values)
switch l {
case 0:
return metric.Values{}
case 1:
return metric.Values{s.values[0]}
default:
index := sort.Search(l, func(i int) bool {
return !s.values[i].Timestamp.Before(t)
})
if index == 0 {
return metric.Values{s.values[0]}
}
if index == l {
return metric.Values{s.values[l-1]}
}
if s.values[index].Timestamp.Equal(t) {
return metric.Values{s.values[index]}
}
return metric.Values{s.values[index-1], s.values[index]}
}
}
func (s *arrayStream) getBoundaryValues(in metric.Interval) metric.Values {
s.RLock()
defer s.RUnlock()
oldest := sort.Search(len(s.values), func(i int) bool {
return !s.values[i].Timestamp.Before(in.OldestInclusive)
})
newest := sort.Search(len(s.values), func(i int) bool {
return s.values[i].Timestamp.After(in.NewestInclusive)
})
resultRange := s.values[oldest:newest]
switch len(resultRange) {
case 0:
return metric.Values{}
case 1:
return metric.Values{resultRange[0]}
default:
return metric.Values{resultRange[0], resultRange[len(resultRange)-1]}
}
}
func (s *arrayStream) getRangeValues(in metric.Interval) metric.Values {
s.RLock()
defer s.RUnlock()
oldest := sort.Search(len(s.values), func(i int) bool {
return !s.values[i].Timestamp.Before(in.OldestInclusive)
})
newest := sort.Search(len(s.values), func(i int) bool {
return s.values[i].Timestamp.After(in.NewestInclusive)
})
result := make(metric.Values, newest-oldest)
copy(result, s.values[oldest:newest])
return result
}
func (s *arrayStream) size() int {
return len(s.values)
}
func (s *arrayStream) clear() {
s.values = metric.Values{}
}
func newArrayStream(m clientmodel.Metric) *arrayStream {
return &arrayStream{
m: m,
values: make(metric.Values, 0, initialSeriesArenaSize),
}
}
type memorySeriesStorage struct {
sync.RWMutex
wmCache *watermarkCache
fingerprintToSeries map[clientmodel.Fingerprint]stream
labelPairToFingerprints map[metric.LabelPair]utility.Set
labelNameToLabelValues map[clientmodel.LabelName]utility.Set
}
// MemorySeriesOptions bundles options used by NewMemorySeriesStorage to create
// a memory series storage.
type MemorySeriesOptions struct {
// If provided, this WatermarkCache will be updated for any samples that
// are appended to the memorySeriesStorage.
WatermarkCache *watermarkCache
}
func (s *memorySeriesStorage) AppendSamples(samples clientmodel.Samples) error {
for _, sample := range samples {
s.AppendSample(sample)
}
return nil
}
func (s *memorySeriesStorage) AppendSample(sample *clientmodel.Sample) error {
s.Lock()
defer s.Unlock()
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromMetric(sample.Metric)
series := s.getOrCreateSeries(sample.Metric, fingerprint)
series.add(metric.Values{
metric.SamplePair{
Value: sample.Value,
Timestamp: sample.Timestamp,
},
})
if s.wmCache != nil {
s.wmCache.Put(fingerprint, &watermarks{High: sample.Timestamp})
}
return nil
}
func (s *memorySeriesStorage) CreateEmptySeries(metric clientmodel.Metric) {
s.Lock()
defer s.Unlock()
m := clientmodel.Metric{}
for label, value := range metric {
m[label] = value
}
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromMetric(m)
s.getOrCreateSeries(m, fingerprint)
}
func (s *memorySeriesStorage) getOrCreateSeries(m clientmodel.Metric, fp *clientmodel.Fingerprint) stream {
series, ok := s.fingerprintToSeries[*fp]
if !ok {
series = newArrayStream(m)
s.fingerprintToSeries[*fp] = series
for k, v := range m {
labelPair := metric.LabelPair{
Name: k,
Value: v,
}
fps, ok := s.labelPairToFingerprints[labelPair]
if !ok {
fps = utility.Set{}
s.labelPairToFingerprints[labelPair] = fps
}
fps.Add(*fp)
values, ok := s.labelNameToLabelValues[k]
if !ok {
values = utility.Set{}
s.labelNameToLabelValues[k] = values
}
values.Add(v)
}
}
return series
}
func (s *memorySeriesStorage) Flush(flushOlderThan clientmodel.Timestamp, queue chan<- clientmodel.Samples) {
s.RLock()
for _, stream := range s.fingerprintToSeries {
toArchive := stream.getOlderThan(flushOlderThan)
queued := make(clientmodel.Samples, 0, len(toArchive))
// NOTE: This duplication will go away soon.
for _, value := range toArchive {
queued = append(queued, &clientmodel.Sample{
Metric: stream.metric(),
Timestamp: value.Timestamp,
Value: value.Value,
})
}
// BUG(all): this can deadlock if the queue is full, as we only ever clear
// the queue after calling this method:
// https://github.com/prometheus/prometheus/issues/275
if len(queued) > 0 {
queue <- queued
}
}
s.RUnlock()
}
func (s *memorySeriesStorage) Evict(flushOlderThan clientmodel.Timestamp) {
emptySeries := []clientmodel.Fingerprint{}
s.RLock()
for fingerprint, stream := range s.fingerprintToSeries {
stream.evictOlderThan(flushOlderThan)
if stream.size() == 0 {
emptySeries = append(emptySeries, fingerprint)
}
}
s.RUnlock()
s.Lock()
for _, fingerprint := range emptySeries {
if series, ok := s.fingerprintToSeries[fingerprint]; ok && series.size() == 0 {
s.dropSeries(&fingerprint)
}
}
s.Unlock()
}
// Drop a label value from the label names to label values index.
func (s *memorySeriesStorage) dropLabelValue(l clientmodel.LabelName, v clientmodel.LabelValue) {
if set, ok := s.labelNameToLabelValues[l]; ok {
set.Remove(v)
if len(set) == 0 {
delete(s.labelNameToLabelValues, l)
}
}
}
// Drop all references to a series, including any samples.
func (s *memorySeriesStorage) dropSeries(fingerprint *clientmodel.Fingerprint) {
series, ok := s.fingerprintToSeries[*fingerprint]
if !ok {
return
}
for k, v := range series.metric() {
labelPair := metric.LabelPair{
Name: k,
Value: v,
}
if set, ok := s.labelPairToFingerprints[labelPair]; ok {
set.Remove(*fingerprint)
if len(set) == 0 {
delete(s.labelPairToFingerprints, labelPair)
s.dropLabelValue(k, v)
}
}
}
delete(s.fingerprintToSeries, *fingerprint)
}
// Append raw samples, bypassing indexing. Only used to add data to views,
// which don't need to lookup by metric.
func (s *memorySeriesStorage) appendSamplesWithoutIndexing(fingerprint *clientmodel.Fingerprint, samples metric.Values) {
s.Lock()
defer s.Unlock()
series, ok := s.fingerprintToSeries[*fingerprint]
if !ok {
series = newArrayStream(clientmodel.Metric{})
s.fingerprintToSeries[*fingerprint] = series
}
series.add(samples)
}
func (s *memorySeriesStorage) GetFingerprintsForLabelMatchers(labelMatchers metric.LabelMatchers) (clientmodel.Fingerprints, error) {
s.RLock()
defer s.RUnlock()
sets := []utility.Set{}
for _, matcher := range labelMatchers {
switch matcher.Type {
case metric.Equal:
set, ok := s.labelPairToFingerprints[metric.LabelPair{
Name: matcher.Name,
Value: matcher.Value,
}]
if !ok {
return nil, nil
}
sets = append(sets, set)
default:
values, err := s.getLabelValuesForLabelName(matcher.Name)
if err != nil {
return nil, err
}
matches := matcher.Filter(values)
if len(matches) == 0 {
return nil, nil
}
set := utility.Set{}
for _, v := range matches {
subset, ok := s.labelPairToFingerprints[metric.LabelPair{
Name: matcher.Name,
Value: v,
}]
if !ok {
return nil, nil
}
for fp := range subset {
set.Add(fp)
}
}
sets = append(sets, set)
}
}
setCount := len(sets)
if setCount == 0 {
return nil, nil
}
base := sets[0]
for i := 1; i < setCount; i++ {
base = base.Intersection(sets[i])
}
fingerprints := clientmodel.Fingerprints{}
for _, e := range base.Elements() {
fingerprint := e.(clientmodel.Fingerprint)
fingerprints = append(fingerprints, &fingerprint)
}
return fingerprints, nil
}
func (s *memorySeriesStorage) GetLabelValuesForLabelName(labelName clientmodel.LabelName) (clientmodel.LabelValues, error) {
s.RLock()
defer s.RUnlock()
return s.getLabelValuesForLabelName(labelName)
}
func (s *memorySeriesStorage) getLabelValuesForLabelName(labelName clientmodel.LabelName) (clientmodel.LabelValues, error) {
set, ok := s.labelNameToLabelValues[labelName]
if !ok {
return nil, nil
}
values := make(clientmodel.LabelValues, 0, len(set))
for e := range set {
val := e.(clientmodel.LabelValue)
values = append(values, val)
}
return values, nil
}
func (s *memorySeriesStorage) GetMetricForFingerprint(f *clientmodel.Fingerprint) (clientmodel.Metric, error) {
s.RLock()
defer s.RUnlock()
series, ok := s.fingerprintToSeries[*f]
if !ok {
return nil, nil
}
metric := clientmodel.Metric{}
for label, value := range series.metric() {
metric[label] = value
}
return metric, nil
}
func (s *memorySeriesStorage) HasFingerprint(f *clientmodel.Fingerprint) bool {
s.RLock()
defer s.RUnlock()
_, has := s.fingerprintToSeries[*f]
return has
}
func (s *memorySeriesStorage) CloneSamples(f *clientmodel.Fingerprint) metric.Values {
s.RLock()
defer s.RUnlock()
series, ok := s.fingerprintToSeries[*f]
if !ok {
return nil
}
return series.clone()
}
func (s *memorySeriesStorage) GetValueAtTime(f *clientmodel.Fingerprint, t clientmodel.Timestamp) metric.Values {
s.RLock()
defer s.RUnlock()
series, ok := s.fingerprintToSeries[*f]
if !ok {
return nil
}
return series.getValueAtTime(t)
}
func (s *memorySeriesStorage) GetBoundaryValues(f *clientmodel.Fingerprint, i metric.Interval) metric.Values {
s.RLock()
defer s.RUnlock()
series, ok := s.fingerprintToSeries[*f]
if !ok {
return nil
}
return series.getBoundaryValues(i)
}
func (s *memorySeriesStorage) GetRangeValues(f *clientmodel.Fingerprint, i metric.Interval) metric.Values {
s.RLock()
defer s.RUnlock()
series, ok := s.fingerprintToSeries[*f]
if !ok {
return nil
}
return series.getRangeValues(i)
}
func (s *memorySeriesStorage) Close() {
s.Lock()
defer s.Unlock()
s.fingerprintToSeries = nil
s.labelPairToFingerprints = nil
s.labelNameToLabelValues = nil
}
func (s *memorySeriesStorage) GetAllValuesForLabel(labelName clientmodel.LabelName) (values clientmodel.LabelValues, err error) {
s.RLock()
defer s.RUnlock()
valueSet := map[clientmodel.LabelValue]bool{}
for _, series := range s.fingerprintToSeries {
if value, ok := series.metric()[labelName]; ok {
if !valueSet[value] {
values = append(values, value)
valueSet[value] = true
}
}
}
return
}
// NewMemorySeriesStorage returns a memory series storage ready to use.
func NewMemorySeriesStorage(o MemorySeriesOptions) *memorySeriesStorage {
return &memorySeriesStorage{
fingerprintToSeries: make(map[clientmodel.Fingerprint]stream),
labelPairToFingerprints: make(map[metric.LabelPair]utility.Set),
labelNameToLabelValues: make(map[clientmodel.LabelName]utility.Set),
wmCache: o.WatermarkCache,
}
}

View file

@ -1,284 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"reflect"
"runtime"
"sync"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
func BenchmarkStreamAdd(b *testing.B) {
b.StopTimer()
s := newArrayStream(clientmodel.Metric{})
samples := make(metric.Values, b.N)
for i := 0; i < b.N; i++ {
samples = append(samples, metric.SamplePair{
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
Value: clientmodel.SampleValue(i),
})
}
b.StartTimer()
s.add(samples)
}
func TestStreamAdd(t *testing.T) {
s := newArrayStream(clientmodel.Metric{})
// Add empty to empty.
v := metric.Values{}
expected := metric.Values{}
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something to empty.
v = metric.Values{
metric.SamplePair{Timestamp: 1, Value: -1},
}
expected = append(expected, v...)
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something to something.
v = metric.Values{
metric.SamplePair{Timestamp: 2, Value: -2},
metric.SamplePair{Timestamp: 5, Value: -5},
}
expected = append(expected, v...)
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something outdated to something.
v = metric.Values{
metric.SamplePair{Timestamp: 3, Value: -3},
metric.SamplePair{Timestamp: 4, Value: -4},
}
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
// Add something partially outdated to something.
v = metric.Values{
metric.SamplePair{Timestamp: 3, Value: -3},
metric.SamplePair{Timestamp: 6, Value: -6},
}
expected = append(expected, metric.SamplePair{Timestamp: 6, Value: -6})
s.add(v)
if got := s.values; !reflect.DeepEqual(expected, got) {
t.Fatalf("Expected values %#v in stream, got %#v.", expected, got)
}
}
func benchmarkAppendSamples(b *testing.B, labels int) {
b.StopTimer()
s := NewMemorySeriesStorage(MemorySeriesOptions{})
metric := clientmodel.Metric{}
for i := 0; i < labels; i++ {
metric[clientmodel.LabelName(fmt.Sprintf("label_%d", i))] = clientmodel.LabelValue(fmt.Sprintf("value_%d", i))
}
samples := make(clientmodel.Samples, 0, b.N)
for i := 0; i < b.N; i++ {
samples = append(samples, &clientmodel.Sample{
Metric: metric,
Value: clientmodel.SampleValue(i),
Timestamp: clientmodel.TimestampFromTime(time.Date(i, 0, 0, 0, 0, 0, 0, time.UTC)),
})
}
b.StartTimer()
for i := 0; i < b.N; i++ {
s.AppendSample(samples[i])
}
}
func BenchmarkAppendSample1(b *testing.B) {
benchmarkAppendSamples(b, 1)
}
func BenchmarkAppendSample10(b *testing.B) {
benchmarkAppendSamples(b, 10)
}
func BenchmarkAppendSample100(b *testing.B) {
benchmarkAppendSamples(b, 100)
}
func BenchmarkAppendSample1000(b *testing.B) {
benchmarkAppendSamples(b, 1000)
}
// Regression test for https://github.com/prometheus/prometheus/issues/381.
//
// 1. Creates samples for two timeseries with one common labelpair.
// 2. Flushes memory storage such that only one series is dropped from memory.
// 3. Gets fingerprints for common labelpair.
// 4. Checks that exactly one fingerprint remains.
func TestDroppedSeriesIndexRegression(t *testing.T) {
samples := clientmodel.Samples{
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"different": "differentvalue1",
"common": "samevalue",
},
Value: 1,
Timestamp: clientmodel.TimestampFromTime(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC)),
},
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"different": "differentvalue2",
"common": "samevalue",
},
Value: 2,
Timestamp: clientmodel.TimestampFromTime(time.Date(2002, 0, 0, 0, 0, 0, 0, time.UTC)),
},
}
s := NewMemorySeriesStorage(MemorySeriesOptions{})
s.AppendSamples(samples)
common := clientmodel.LabelSet{"common": "samevalue"}
fps, err := s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
if err != nil {
t.Fatal(err)
}
if len(fps) != 2 {
t.Fatalf("Got %d fingerprints, expected 2", len(fps))
}
toDisk := make(chan clientmodel.Samples, 2)
flushOlderThan := clientmodel.TimestampFromTime(time.Date(2001, 0, 0, 0, 0, 0, 0, time.UTC))
s.Flush(flushOlderThan, toDisk)
if len(toDisk) != 1 {
t.Fatalf("Got %d disk sample lists, expected 1", len(toDisk))
}
diskSamples := <-toDisk
if len(diskSamples) != 1 {
t.Fatalf("Got %d disk samples, expected 1", len(diskSamples))
}
s.Evict(flushOlderThan)
fps, err = s.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(common))
if err != nil {
t.Fatal(err)
}
if len(fps) != 1 {
t.Fatalf("Got %d fingerprints, expected 1", len(fps))
}
}
func TestReaderWriterDeadlockRegression(t *testing.T) {
mp := runtime.GOMAXPROCS(2)
defer func(mp int) {
runtime.GOMAXPROCS(mp)
}(mp)
s := NewMemorySeriesStorage(MemorySeriesOptions{})
lms := metric.LabelMatchers{}
for i := 0; i < 100; i++ {
lm, err := metric.NewLabelMatcher(metric.NotEqual, clientmodel.MetricNameLabel, "testmetric")
if err != nil {
t.Fatal(err)
}
lms = append(lms, lm)
}
wg := sync.WaitGroup{}
wg.Add(2)
start := time.Now()
runDuration := 250 * time.Millisecond
writer := func() {
for time.Since(start) < runDuration {
s.AppendSamples(clientmodel.Samples{
&clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
},
Value: 1,
Timestamp: 0,
},
})
}
wg.Done()
}
reader := func() {
for time.Since(start) < runDuration {
s.GetFingerprintsForLabelMatchers(lms)
}
wg.Done()
}
go reader()
go writer()
allDone := make(chan struct{})
go func() {
wg.Wait()
allDone <- struct{}{}
}()
select {
case <-allDone:
break
case <-time.NewTimer(5 * time.Second).C:
t.Fatalf("Deadlock timeout")
}
}
func BenchmarkGetFingerprintsForNotEqualMatcher1000(b *testing.B) {
numSeries := 1000
samples := make(clientmodel.Samples, 0, numSeries)
for i := 0; i < numSeries; i++ {
samples = append(samples, &clientmodel.Sample{
Metric: clientmodel.Metric{
clientmodel.MetricNameLabel: "testmetric",
"instance": clientmodel.LabelValue(fmt.Sprint("instance_", i)),
},
Value: 1,
Timestamp: clientmodel.TimestampFromTime(time.Date(2000, 0, 0, 0, 0, 0, 0, time.UTC)),
})
}
s := NewMemorySeriesStorage(MemorySeriesOptions{})
if err := s.AppendSamples(samples); err != nil {
b.Fatal(err)
}
m, err := metric.NewLabelMatcher(metric.NotEqual, "instance", "foo")
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.GetFingerprintsForLabelMatchers(metric.LabelMatchers{m})
}
}

View file

@ -1,212 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"github.com/golang/glog"
"github.com/prometheus/prometheus/storage/raw/leveldb"
dto "github.com/prometheus/prometheus/model/generated"
)
type iteratorSeekerState struct {
// Immutable State
i leveldb.Iterator
obj *SampleKey
first, last *SampleKey
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
// Mutable State
iteratorInvalid bool
seriesOperable bool
err error
key *SampleKey
keyDto *dto.SampleKey
}
// iteratorSeeker is a function that models a state machine state and
// is responsible for choosing the subsequent state given the present
// disposition.
//
// It returns the next state or nil if no remaining transition is possible.
// It returns an error if one occurred and finally a truth value indicating
// whether the current iterator state is usable and whether it can proceed with
// the current fingerprint.
type iteratorSeeker func() iteratorSeeker
func (s *iteratorSeekerState) initialize() iteratorSeeker {
s.key, _ = s.sampleKeys.Get()
s.keyDto, _ = s.dtoSampleKeys.Get()
return s.start
}
func (s *iteratorSeekerState) destroy() iteratorSeeker {
s.sampleKeys.Give(s.key)
s.dtoSampleKeys.Give(s.keyDto)
return nil
}
func (s *iteratorSeekerState) start() iteratorSeeker {
switch {
case s.obj.Fingerprint.Less(s.first.Fingerprint):
// The fingerprint does not exist in the database.
return s.destroy
case s.last.Fingerprint.Less(s.obj.Fingerprint):
// The fingerprint does not exist in the database.
return s.destroy
case s.obj.Fingerprint.Equal(s.first.Fingerprint) && s.obj.FirstTimestamp.Before(s.first.FirstTimestamp):
// The fingerprint is the first fingerprint, but we've requested a value
// before what exists in the database.
return s.seekBeginning
case s.last.Before(s.obj.Fingerprint, s.obj.FirstTimestamp):
// The requested time for work is after the last sample in the database; we
// can't do anything!
return s.destroy
default:
return s.initialSeek
}
}
func (s *iteratorSeekerState) seekBeginning() iteratorSeeker {
s.i.SeekToFirst()
if !s.i.Valid() {
s.err = s.i.Error()
// If we can't seek to the beginning, there isn't any hope for us.
glog.Warning("iterator went bad: %s", s.err)
s.iteratorInvalid = true
return s.destroy
}
return s.initialMatchFingerprint
}
func (s *iteratorSeekerState) initialSeek() iteratorSeeker {
s.obj.Dump(s.keyDto)
s.i.Seek(s.keyDto)
if !s.i.Valid() {
s.err = s.i.Error()
glog.Warningf("iterator went bad %s", s.err)
s.iteratorInvalid = true
return s.destroy
}
return s.initialMatchFingerprint
}
func (s *iteratorSeekerState) initialMatchFingerprint() iteratorSeeker {
if err := s.i.Key(s.keyDto); err != nil {
s.err = err
return s.destroy
}
s.key.Load(s.keyDto)
switch {
case s.obj.Fingerprint.Less(s.key.Fingerprint):
return s.initialFingerprintOvershot
case s.key.Fingerprint.Less(s.obj.Fingerprint):
panic("violated invariant")
default:
return s.initialMatchTime
}
}
func (s *iteratorSeekerState) initialFingerprintOvershot() iteratorSeeker {
s.i.Previous()
if !s.i.Valid() {
glog.Warningf("Could not backtrack for %s", s)
panic("violated invariant")
}
if err := s.i.Key(s.keyDto); err != nil {
s.err = err
return s.destroy
}
s.key.Load(s.keyDto)
if !s.key.Fingerprint.Equal(s.obj.Fingerprint) {
return s.destroy
}
return s.initialMatchTime
}
func (s *iteratorSeekerState) initialMatchTime() iteratorSeeker {
switch {
case s.key.MayContain(s.obj.FirstTimestamp):
s.seriesOperable = true
return s.destroy
case s.key.Equal(s.first), s.obj.FirstTimestamp.Equal(s.key.FirstTimestamp):
s.seriesOperable = true
return s.destroy
case s.obj.FirstTimestamp.Before(s.key.FirstTimestamp):
return s.reCue
default:
panic("violated invariant " + fmt.Sprintln(s.obj, s.key))
}
}
func (s *iteratorSeekerState) reCue() iteratorSeeker {
s.i.Previous()
if !s.i.Valid() {
glog.Warningf("Could not backtrack for %s", s)
panic("violated invariant")
}
if err := s.i.Key(s.keyDto); err != nil {
s.err = err
return s.destroy
}
s.key.Load(s.keyDto)
if !s.key.Fingerprint.Equal(s.obj.Fingerprint) {
return s.fastForward
}
s.seriesOperable = true
return s.destroy
}
func (s *iteratorSeekerState) fastForward() iteratorSeeker {
s.i.Next()
if !s.i.Valid() {
glog.Warningf("Could not fast-forward for %s", s)
panic("violated invariant")
}
s.seriesOperable = true
return s.destroy
}

View file

@ -1,326 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"sort"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
// durationOperator encapsulates a general operation that occurs over a
// duration.
type durationOperator interface {
metric.Op
Through() clientmodel.Timestamp
}
// ops is a heap of operations, primary sorting key is the fingerprint.
type ops []metric.Op
// Len implements sort.Interface and heap.Interface.
func (o ops) Len() int {
return len(o)
}
// Less implements sort.Interface and heap.Interface. It compares the
// fingerprints. If they are equal, the comparison is delegated to
// currentTimeSort.
func (o ops) Less(i, j int) bool {
fpi := o[i].Fingerprint()
fpj := o[j].Fingerprint()
if fpi.Equal(fpj) {
return currentTimeSort{o}.Less(i, j)
}
return fpi.Less(fpj)
}
// Swap implements sort.Interface and heap.Interface.
func (o ops) Swap(i, j int) {
o[i], o[j] = o[j], o[i]
}
// Push implements heap.Interface.
func (o *ops) Push(x interface{}) {
// Push and Pop use pointer receivers because they modify the slice's
// length, not just its contents.
*o = append(*o, x.(metric.Op))
}
// Push implements heap.Interface.
func (o *ops) Pop() interface{} {
old := *o
n := len(old)
x := old[n-1]
*o = old[0 : n-1]
return x
}
// currentTimeSort is a wrapper for ops with customized sorting order.
type currentTimeSort struct {
ops
}
// currentTimeSort implements sort.Interface and sorts the operations in
// chronological order by their current time.
func (s currentTimeSort) Less(i, j int) bool {
return s.ops[i].CurrentTime().Before(s.ops[j].CurrentTime())
}
// baseOp contains the implementations and fields shared between different op
// types.
type baseOp struct {
fp clientmodel.Fingerprint
current clientmodel.Timestamp
}
func (g *baseOp) Fingerprint() *clientmodel.Fingerprint {
return &g.fp
}
func (g *baseOp) CurrentTime() clientmodel.Timestamp {
return g.current
}
// getValuesAtTimeOp encapsulates getting values at or adjacent to a specific
// time.
type getValuesAtTimeOp struct {
baseOp
consumed bool
}
func (g *getValuesAtTimeOp) String() string {
return fmt.Sprintf("getValuesAtTimeOp at %s", g.current)
}
func (g *getValuesAtTimeOp) ExtractSamples(in metric.Values) (out metric.Values) {
if len(in) == 0 {
return
}
out = extractValuesAroundTime(g.current, in)
g.consumed = true
return
}
func (g getValuesAtTimeOp) Consumed() bool {
return g.consumed
}
// getValuesAlongRangeOp encapsulates getting all values in a given range.
type getValuesAlongRangeOp struct {
baseOp
through clientmodel.Timestamp
}
func (g *getValuesAlongRangeOp) String() string {
return fmt.Sprintf("getValuesAlongRangeOp from %s through %s", g.current, g.through)
}
func (g *getValuesAlongRangeOp) Through() clientmodel.Timestamp {
return g.through
}
func (g *getValuesAlongRangeOp) ExtractSamples(in metric.Values) (out metric.Values) {
if len(in) == 0 {
return
}
// Find the first sample where time >= g.current.
firstIdx := sort.Search(len(in), func(i int) bool {
return !in[i].Timestamp.Before(g.current)
})
if firstIdx == len(in) {
// No samples at or after operator start time. This can only
// happen if we try applying the operator to a time after the
// last recorded sample. In this case, we're finished.
g.current = g.through.Add(clientmodel.MinimumTick)
return
}
// Find the first sample where time > g.through.
lastIdx := sort.Search(len(in), func(i int) bool {
return in[i].Timestamp.After(g.through)
})
if lastIdx == firstIdx {
g.current = g.through.Add(clientmodel.MinimumTick)
return
}
lastSampleTime := in[lastIdx-1].Timestamp
// Sample times are stored with a maximum time resolution of one second,
// so we have to add exactly that to target the next chunk on the next
// op iteration.
g.current = lastSampleTime.Add(time.Second)
return in[firstIdx:lastIdx]
}
func (g *getValuesAlongRangeOp) Consumed() bool {
return g.current.After(g.through)
}
// getValuesAtIntervalOp encapsulates getting values at a given interval over a
// duration.
type getValuesAtIntervalOp struct {
getValuesAlongRangeOp
interval time.Duration
}
func (g *getValuesAtIntervalOp) String() string {
return fmt.Sprintf("getValuesAtIntervalOp from %s each %s through %s", g.current, g.interval, g.through)
}
func (g *getValuesAtIntervalOp) ExtractSamples(in metric.Values) (out metric.Values) {
if len(in) == 0 {
return
}
lastChunkTime := in[len(in)-1].Timestamp
if g.current.After(lastChunkTime) {
g.current = g.through.Add(clientmodel.MinimumTick)
return metric.Values{in[len(in)-1]}
}
for len(in) > 0 {
out = append(out, extractValuesAroundTime(g.current, in)...)
if g.current.After(lastChunkTime) {
break
}
lastExtractedTime := out[len(out)-1].Timestamp
in = in.TruncateBefore(lastExtractedTime.Add(
clientmodel.MinimumTick))
g.current = g.current.Add(g.interval)
for !g.current.After(lastExtractedTime) {
g.current = g.current.Add(g.interval)
}
if lastExtractedTime.Equal(lastChunkTime) {
break
}
if g.current.After(g.through) {
break
}
}
return
}
// getValueRangeAtIntervalOp encapsulates getting all values from ranges along
// intervals.
//
// Works just like getValuesAlongRangeOp, but when from > through, through is
// incremented by interval and from is reset to through-rangeDuration. Returns
// current time nil when from > totalThrough.
type getValueRangeAtIntervalOp struct {
getValuesAtIntervalOp
rangeThrough clientmodel.Timestamp
rangeDuration time.Duration
}
func (g *getValueRangeAtIntervalOp) String() string {
return fmt.Sprintf("getValueRangeAtIntervalOp range %s from %s each %s through %s", g.rangeDuration, g.current, g.interval, g.through)
}
// Through panics because the notion of 'through' is ambiguous for this op.
func (g *getValueRangeAtIntervalOp) Through() clientmodel.Timestamp {
panic("not implemented")
}
func (g *getValueRangeAtIntervalOp) advanceToNextInterval() {
g.rangeThrough = g.rangeThrough.Add(g.interval)
g.current = g.rangeThrough.Add(-g.rangeDuration)
}
func (g *getValueRangeAtIntervalOp) ExtractSamples(in metric.Values) (out metric.Values) {
if len(in) == 0 {
return
}
// Find the first sample where time >= g.current.
firstIdx := sort.Search(len(in), func(i int) bool {
return !in[i].Timestamp.Before(g.current)
})
if firstIdx == len(in) {
// No samples at or after operator start time. This can only
// happen if we try applying the operator to a time after the
// last recorded sample. In this case, we're finished.
g.current = g.through.Add(clientmodel.MinimumTick)
return
}
// Find the first sample where time > g.rangeThrough.
lastIdx := sort.Search(len(in), func(i int) bool {
return in[i].Timestamp.After(g.rangeThrough)
})
// This only happens when there is only one sample and it is both after
// g.current and after g.rangeThrough. In this case, both indexes are 0.
if lastIdx == firstIdx {
g.advanceToNextInterval()
return
}
lastSampleTime := in[lastIdx-1].Timestamp
// Sample times are stored with a maximum time resolution of one second,
// so we have to add exactly that to target the next chunk on the next
// op iteration.
g.current = lastSampleTime.Add(time.Second)
if g.current.After(g.rangeThrough) {
g.advanceToNextInterval()
}
return in[firstIdx:lastIdx]
}
// getValuesAtIntervalOps contains getValuesAtIntervalOp operations. It
// implements sort.Interface and sorts the operations in ascending order by
// their frequency.
type getValuesAtIntervalOps []*getValuesAtIntervalOp
func (s getValuesAtIntervalOps) Len() int {
return len(s)
}
func (s getValuesAtIntervalOps) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s getValuesAtIntervalOps) Less(i, j int) bool {
return s[i].interval < s[j].interval
}
// extractValuesAroundTime searches for the provided time in the list of
// available samples and emits a slice containing the data points that
// are adjacent to it.
//
// An assumption of this is that the provided samples are already sorted!
func extractValuesAroundTime(t clientmodel.Timestamp, in metric.Values) metric.Values {
i := sort.Search(len(in), func(i int) bool {
return !in[i].Timestamp.Before(t)
})
if i == len(in) {
// Target time is past the end, return only the last sample.
return in[len(in)-1:]
}
if in[i].Timestamp.Equal(t) && len(in) > i+1 {
// We hit exactly the current sample time. Very unlikely in
// practice. Return only the current sample.
return in[i : i+1]
}
if i == 0 {
// We hit before the first sample time. Return only the first
// sample.
return in[0:1]
}
// We hit between two samples. Return both surrounding samples.
return in[i-1 : i+1]
}

View file

@ -1,826 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
"time"
"github.com/prometheus/prometheus/storage/metric"
)
func TestGetValuesAtTimeOp(t *testing.T) {
var scenarios = []struct {
op getValuesAtTimeOp
in metric.Values
out metric.Values
}{
// No values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant},
},
},
// Operator time before single value.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator time exactly at single value.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(1 * time.Minute)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator time after single value.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator time before two values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator time at first of two values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(1 * time.Minute)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator time between first and second of two values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(90 * time.Second)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
// Operator time at second of two values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
// Operator time after second of two values.
{
op: getValuesAtTimeOp{
baseOp: baseOp{current: testInstant.Add(3 * time.Minute)},
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
}
for i, scenario := range scenarios {
actual := scenario.op.ExtractSamples(scenario.in)
if len(actual) != len(scenario.out) {
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), scenario.op)
t.Fatalf("%d. expected length %d, got %d", i, len(scenario.out), len(actual))
}
for j, out := range scenario.out {
if !out.Equal(&actual[j]) {
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
}
}
}
}
func TestGetValuesAtIntervalOp(t *testing.T) {
var scenarios = []struct {
op getValuesAtIntervalOp
in metric.Values
out metric.Values
}{
// No values.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(1 * time.Minute),
},
interval: 30 * time.Second,
},
},
// Entire operator range before first value.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(1 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
// Operator range starts before first value, ends within available values.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(2 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
},
// Entire operator range is within available values.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(1 * time.Minute)},
through: testInstant.Add(2 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
},
// Operator range begins before first value, ends after last.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(3 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
// Operator range begins within available values, ends after the last value.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
through: testInstant.Add(4 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
},
// Entire operator range after the last available value.
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
through: testInstant.Add(3 * time.Minute),
},
interval: 30 * time.Second,
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator interval skips over several values and ends past the last
// available value. This is to verify that we still include the last value
// of a series even if we target a time past it and haven't extracted that
// value yet as part of a previous interval step (thus the necessity to
// skip over values for the test).
{
op: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(30 * time.Second)},
through: testInstant.Add(4 * time.Minute),
},
interval: 3 * time.Minute,
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
},
}
for i, scenario := range scenarios {
actual := scenario.op.ExtractSamples(scenario.in)
if len(actual) != len(scenario.out) {
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), actual)
}
if len(scenario.in) < 1 {
continue
}
lastExtractedTime := scenario.out[len(scenario.out)-1].Timestamp
if !scenario.op.Consumed() && scenario.op.CurrentTime().Before(lastExtractedTime) {
t.Fatalf("%d. expected op to be consumed or with CurrentTime() after current chunk, %v, %v", i, scenario.op.CurrentTime(), scenario.out)
}
for j, out := range scenario.out {
if !out.Equal(&actual[j]) {
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
}
}
}
}
func TestGetValuesAlongRangeOp(t *testing.T) {
var scenarios = []struct {
op getValuesAlongRangeOp
in metric.Values
out metric.Values
}{
// No values.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(1 * time.Minute),
},
},
// Entire operator range before first value.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(1 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{},
},
// Operator range starts before first value, ends within available values.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(2 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Entire operator range is within available values.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(1 * time.Minute)},
through: testInstant.Add(2 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
},
// Operator range begins before first value, ends after last.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant},
through: testInstant.Add(3 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
},
},
// Operator range begins within available values, ends after the last value.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
through: testInstant.Add(4 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(2 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(3 * time.Minute),
Value: 1,
},
},
},
// Entire operator range after the last available value.
{
op: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(2 * time.Minute)},
through: testInstant.Add(3 * time.Minute),
},
in: metric.Values{
{
Timestamp: testInstant,
Value: 1,
},
{
Timestamp: testInstant.Add(1 * time.Minute),
Value: 1,
},
},
out: metric.Values{},
},
}
for i, scenario := range scenarios {
actual := scenario.op.ExtractSamples(scenario.in)
if len(actual) != len(scenario.out) {
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), actual)
}
for j, out := range scenario.out {
if !out.Equal(&actual[j]) {
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
}
}
}
}
func TestGetValueRangeAtIntervalOp(t *testing.T) {
testOp := getValueRangeAtIntervalOp{
getValuesAtIntervalOp: getValuesAtIntervalOp{
getValuesAlongRangeOp: getValuesAlongRangeOp{
baseOp: baseOp{current: testInstant.Add(-2 * time.Minute)},
through: testInstant.Add(20 * time.Minute),
},
interval: 10 * time.Minute,
},
rangeThrough: testInstant,
rangeDuration: 2 * time.Minute,
}
var scenarios = []struct {
op getValueRangeAtIntervalOp
in metric.Values
out metric.Values
}{
// All values before the first range.
{
op: testOp,
in: metric.Values{
{
Timestamp: testInstant.Add(-4 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(-3 * time.Minute),
Value: 2,
},
},
out: metric.Values{},
},
// metric.Values starting before first range, ending after last.
{
op: testOp,
in: metric.Values{
{
Timestamp: testInstant.Add(-4 * time.Minute),
Value: 1,
},
{
Timestamp: testInstant.Add(-3 * time.Minute),
Value: 2,
},
{
Timestamp: testInstant.Add(-2 * time.Minute),
Value: 3,
},
{
Timestamp: testInstant.Add(-1 * time.Minute),
Value: 4,
},
{
Timestamp: testInstant.Add(0 * time.Minute),
Value: 5,
},
{
Timestamp: testInstant.Add(5 * time.Minute),
Value: 6,
},
{
Timestamp: testInstant.Add(8 * time.Minute),
Value: 7,
},
{
Timestamp: testInstant.Add(9 * time.Minute),
Value: 8,
},
{
Timestamp: testInstant.Add(10 * time.Minute),
Value: 9,
},
{
Timestamp: testInstant.Add(15 * time.Minute),
Value: 10,
},
{
Timestamp: testInstant.Add(18 * time.Minute),
Value: 11,
},
{
Timestamp: testInstant.Add(19 * time.Minute),
Value: 12,
},
{
Timestamp: testInstant.Add(20 * time.Minute),
Value: 13,
},
{
Timestamp: testInstant.Add(21 * time.Minute),
Value: 14,
},
},
out: metric.Values{
{
Timestamp: testInstant.Add(-2 * time.Minute),
Value: 3,
},
{
Timestamp: testInstant.Add(-1 * time.Minute),
Value: 4,
},
{
Timestamp: testInstant.Add(0 * time.Minute),
Value: 5,
},
{
Timestamp: testInstant.Add(8 * time.Minute),
Value: 7,
},
{
Timestamp: testInstant.Add(9 * time.Minute),
Value: 8,
},
{
Timestamp: testInstant.Add(10 * time.Minute),
Value: 9,
},
{
Timestamp: testInstant.Add(18 * time.Minute),
Value: 11,
},
{
Timestamp: testInstant.Add(19 * time.Minute),
Value: 12,
},
{
Timestamp: testInstant.Add(20 * time.Minute),
Value: 13,
},
},
},
// metric.Values starting after last range.
{
op: testOp,
in: metric.Values{
{
Timestamp: testInstant.Add(21 * time.Minute),
Value: 14,
},
},
out: metric.Values{},
},
}
for i, scenario := range scenarios {
actual := metric.Values{}
for !scenario.op.Consumed() {
actual = append(actual, scenario.op.ExtractSamples(scenario.in)...)
}
if len(actual) != len(scenario.out) {
t.Fatalf("%d. expected length %d, got %d: %v", i, len(scenario.out), len(actual), actual)
}
for j, out := range scenario.out {
if !out.Equal(&actual[j]) {
t.Fatalf("%d. expected output %v, got %v", i, scenario.out, actual)
}
}
}
}

View file

@ -1,449 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"code.google.com/p/goprotobuf/proto"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/storage/raw/leveldb"
dto "github.com/prometheus/prometheus/model/generated"
)
// Processor models a post-processing agent that performs work given a sample
// corpus.
type Processor interface {
// Name emits the name of this processor's signature encoder. It must
// be fully-qualified in the sense that it could be used via a Protocol
// Buffer registry to extract the descriptor to reassemble this message.
Name() string
// Signature emits a byte signature for this process for the purpose of
// remarking how far along it has been applied to the database.
Signature() []byte
// Apply runs this processor against the sample set. sampleIterator
// expects to be pre-seeked to the initial starting position. The
// processor will run until up until stopAt has been reached. It is
// imperative that the provided stopAt is within the interval of the
// series frontier.
//
// Upon completion or error, the last time at which the processor
// finished shall be emitted in addition to any errors.
Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error)
// Close reaps all of the underlying system resources associated with
// this processor.
Close()
}
// CompactionProcessor combines sparse values in the database together such that
// at least MinimumGroupSize-sized chunks are grouped together. It implements
// the Processor interface.
type CompactionProcessor struct {
maximumMutationPoolBatch int
minimumGroupSize int
// signature is the byte representation of the CompactionProcessor's
// settings, used for purely memoization purposes across an instance.
signature []byte
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
}
// Name implements the Processor interface. It returns
// "io.prometheus.CompactionProcessorDefinition".
func (p *CompactionProcessor) Name() string {
return "io.prometheus.CompactionProcessorDefinition"
}
// Signature implements the Processor interface.
func (p *CompactionProcessor) Signature() []byte {
if len(p.signature) == 0 {
out, err := proto.Marshal(&dto.CompactionProcessorDefinition{
MinimumGroupSize: proto.Uint32(uint32(p.minimumGroupSize)),
})
if err != nil {
panic(err)
}
p.signature = out
}
return p.signature
}
func (p *CompactionProcessor) String() string {
return fmt.Sprintf("compactionProcessor for minimum group size %d", p.minimumGroupSize)
}
// Apply implements the Processor interface.
func (p *CompactionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error) {
var pendingBatch raw.Batch
defer func() {
if pendingBatch != nil {
pendingBatch.Close()
}
}()
var pendingMutations = 0
var pendingSamples metric.Values
var unactedSamples metric.Values
var lastTouchedTime clientmodel.Timestamp
var keyDropped bool
sampleKey, _ := p.sampleKeys.Get()
defer p.sampleKeys.Give(sampleKey)
sampleKeyDto, _ := p.dtoSampleKeys.Get()
defer p.dtoSampleKeys.Give(sampleKeyDto)
if err = sampleIterator.Key(sampleKeyDto); err != nil {
return
}
sampleKey.Load(sampleKeyDto)
unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil)
for lastCurated.Before(stopAt) && lastTouchedTime.Before(stopAt) && sampleKey.Fingerprint.Equal(fingerprint) {
switch {
// Furnish a new pending batch operation if none is available.
case pendingBatch == nil:
pendingBatch = leveldb.NewBatch()
// If there are no sample values to extract from the datastore, let's
// continue extracting more values to use. We know that the time.Before()
// block would prevent us from going into unsafe territory.
case len(unactedSamples) == 0:
if !sampleIterator.Next() {
return lastCurated, fmt.Errorf("illegal condition: invalid iterator on continuation")
}
keyDropped = false
if err = sampleIterator.Key(sampleKeyDto); err != nil {
return
}
sampleKey.Load(sampleKeyDto)
if !sampleKey.Fingerprint.Equal(fingerprint) {
break
}
unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil)
// If the number of pending mutations exceeds the allowed batch amount,
// commit to disk and delete the batch. A new one will be recreated if
// necessary.
case pendingMutations >= p.maximumMutationPoolBatch:
err = samplesPersistence.Commit(pendingBatch)
if err != nil {
return
}
pendingMutations = 0
pendingBatch.Close()
pendingBatch = nil
case len(pendingSamples) == 0 && len(unactedSamples) >= p.minimumGroupSize:
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
unactedSamples = metric.Values{}
case len(pendingSamples)+len(unactedSamples) < p.minimumGroupSize:
if !keyDropped {
k := &dto.SampleKey{}
sampleKey.Dump(k)
pendingBatch.Drop(k)
keyDropped = true
}
pendingSamples = append(pendingSamples, unactedSamples...)
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
unactedSamples = metric.Values{}
pendingMutations++
// If the number of pending writes equals the target group size
case len(pendingSamples) == p.minimumGroupSize:
k := &dto.SampleKey{}
newSampleKey := buildSampleKey(fingerprint, pendingSamples)
newSampleKey.Dump(k)
b := marshalValues(pendingSamples, nil)
pendingBatch.PutRaw(k, b)
pendingMutations++
lastCurated = newSampleKey.FirstTimestamp
if len(unactedSamples) > 0 {
if !keyDropped {
sampleKey.Dump(k)
pendingBatch.Drop(k)
keyDropped = true
}
if len(unactedSamples) > p.minimumGroupSize {
pendingSamples = unactedSamples[:p.minimumGroupSize]
unactedSamples = unactedSamples[p.minimumGroupSize:]
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
} else {
pendingSamples = unactedSamples
lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
unactedSamples = metric.Values{}
}
}
case len(pendingSamples)+len(unactedSamples) >= p.minimumGroupSize:
if !keyDropped {
k := &dto.SampleKey{}
sampleKey.Dump(k)
pendingBatch.Drop(k)
keyDropped = true
}
remainder := p.minimumGroupSize - len(pendingSamples)
pendingSamples = append(pendingSamples, unactedSamples[:remainder]...)
unactedSamples = unactedSamples[remainder:]
if len(unactedSamples) == 0 {
lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
} else {
lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
}
pendingMutations++
default:
err = fmt.Errorf("unhandled processing case")
}
}
if len(unactedSamples) > 0 || len(pendingSamples) > 0 {
pendingSamples = append(pendingSamples, unactedSamples...)
k := &dto.SampleKey{}
newSampleKey := buildSampleKey(fingerprint, pendingSamples)
newSampleKey.Dump(k)
b := marshalValues(pendingSamples, nil)
pendingBatch.PutRaw(k, b)
pendingSamples = metric.Values{}
pendingMutations++
lastCurated = newSampleKey.FirstTimestamp
}
// This is not deferred due to the off-chance that a pre-existing commit
// failed.
if pendingBatch != nil && pendingMutations > 0 {
err = samplesPersistence.Commit(pendingBatch)
if err != nil {
return
}
}
return
}
// Close implements the Processor interface.
func (p *CompactionProcessor) Close() {
p.dtoSampleKeys.Close()
p.sampleKeys.Close()
}
// CompactionProcessorOptions are used for connstruction of a
// CompactionProcessor.
type CompactionProcessorOptions struct {
// MaximumMutationPoolBatch represents approximately the largest pending
// batch of mutation operations for the database before pausing to
// commit before resumption.
//
// A reasonable value would be (MinimumGroupSize * 2) + 1.
MaximumMutationPoolBatch int
// MinimumGroupSize represents the smallest allowed sample chunk size in the
// database.
MinimumGroupSize int
}
// NewCompactionProcessor returns a CompactionProcessor ready to use.
func NewCompactionProcessor(o *CompactionProcessorOptions) *CompactionProcessor {
return &CompactionProcessor{
maximumMutationPoolBatch: o.MaximumMutationPoolBatch,
minimumGroupSize: o.MinimumGroupSize,
dtoSampleKeys: newDtoSampleKeyList(10),
sampleKeys: newSampleKeyList(10),
}
}
// DeletionProcessor deletes sample blocks older than a defined value. It
// implements the Processor interface.
type DeletionProcessor struct {
maximumMutationPoolBatch int
// signature is the byte representation of the DeletionProcessor's settings,
// used for purely memoization purposes across an instance.
signature []byte
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
}
// Name implements the Processor interface. It returns
// "io.prometheus.DeletionProcessorDefinition".
func (p *DeletionProcessor) Name() string {
return "io.prometheus.DeletionProcessorDefinition"
}
// Signature implements the Processor interface.
func (p *DeletionProcessor) Signature() []byte {
if len(p.signature) == 0 {
out, err := proto.Marshal(&dto.DeletionProcessorDefinition{})
if err != nil {
panic(err)
}
p.signature = out
}
return p.signature
}
func (p *DeletionProcessor) String() string {
return "deletionProcessor"
}
// Apply implements the Processor interface.
func (p *DeletionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error) {
var pendingBatch raw.Batch
defer func() {
if pendingBatch != nil {
pendingBatch.Close()
}
}()
sampleKeyDto, _ := p.dtoSampleKeys.Get()
defer p.dtoSampleKeys.Give(sampleKeyDto)
sampleKey, _ := p.sampleKeys.Get()
defer p.sampleKeys.Give(sampleKey)
if err = sampleIterator.Key(sampleKeyDto); err != nil {
return
}
sampleKey.Load(sampleKeyDto)
sampleValues := unmarshalValues(sampleIterator.RawValue(), nil)
pendingMutations := 0
for lastCurated.Before(stopAt) && sampleKey.Fingerprint.Equal(fingerprint) {
switch {
// Furnish a new pending batch operation if none is available.
case pendingBatch == nil:
pendingBatch = leveldb.NewBatch()
// If there are no sample values to extract from the datastore,
// let's continue extracting more values to use. We know that
// the time.Before() block would prevent us from going into
// unsafe territory.
case len(sampleValues) == 0:
if !sampleIterator.Next() {
return lastCurated, fmt.Errorf("illegal condition: invalid iterator on continuation")
}
if err = sampleIterator.Key(sampleKeyDto); err != nil {
return
}
sampleKey.Load(sampleKeyDto)
sampleValues = unmarshalValues(sampleIterator.RawValue(), nil)
// If the number of pending mutations exceeds the allowed batch
// amount, commit to disk and delete the batch. A new one will
// be recreated if necessary.
case pendingMutations >= p.maximumMutationPoolBatch:
err = samplesPersistence.Commit(pendingBatch)
if err != nil {
return
}
pendingMutations = 0
pendingBatch.Close()
pendingBatch = nil
case !sampleKey.MayContain(stopAt):
k := &dto.SampleKey{}
sampleKey.Dump(k)
pendingBatch.Drop(k)
lastCurated = sampleKey.LastTimestamp
sampleValues = metric.Values{}
pendingMutations++
case sampleKey.MayContain(stopAt):
k := &dto.SampleKey{}
sampleKey.Dump(k)
pendingBatch.Drop(k)
pendingMutations++
sampleValues = sampleValues.TruncateBefore(stopAt)
if len(sampleValues) > 0 {
k := &dto.SampleKey{}
sampleKey = buildSampleKey(fingerprint, sampleValues)
sampleKey.Dump(k)
lastCurated = sampleKey.FirstTimestamp
v := marshalValues(sampleValues, nil)
pendingBatch.PutRaw(k, v)
pendingMutations++
} else {
lastCurated = sampleKey.LastTimestamp
}
default:
err = fmt.Errorf("unhandled processing case")
}
}
// This is not deferred due to the off-chance that a pre-existing commit
// failed.
if pendingBatch != nil && pendingMutations > 0 {
err = samplesPersistence.Commit(pendingBatch)
if err != nil {
return
}
}
return
}
// Close implements the Processor interface.
func (p *DeletionProcessor) Close() {
p.dtoSampleKeys.Close()
p.sampleKeys.Close()
}
// DeletionProcessorOptions are used for connstruction of a DeletionProcessor.
type DeletionProcessorOptions struct {
// MaximumMutationPoolBatch represents approximately the largest pending
// batch of mutation operations for the database before pausing to
// commit before resumption.
MaximumMutationPoolBatch int
}
// NewDeletionProcessor returns a DeletionProcessor ready to use.
func NewDeletionProcessor(o *DeletionProcessorOptions) *DeletionProcessor {
return &DeletionProcessor{
maximumMutationPoolBatch: o.MaximumMutationPoolBatch,
dtoSampleKeys: newDtoSampleKeyList(10),
sampleKeys: newSampleKeyList(10),
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,86 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
func GetFingerprintsForLabelSetUsesAndForLabelMatchingTests(p metric.Persistence, t testing.TB) {
metrics := []clientmodel.LabelSet{
{clientmodel.MetricNameLabel: "request_metrics_latency_equal_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
{clientmodel.MetricNameLabel: "requests_metrics_latency_equal_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
{clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
{clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
{clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
}
for _, metric := range metrics {
m := clientmodel.Metric{}
for k, v := range metric {
m[clientmodel.LabelName(k)] = clientmodel.LabelValue(v)
}
testAppendSamples(p, &clientmodel.Sample{
Value: clientmodel.SampleValue(0.0),
Timestamp: clientmodel.Now(),
Metric: m,
}, t)
}
labelSet := clientmodel.LabelSet{
clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms",
"percentile": "0.010000",
}
fingerprints, err := p.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(labelSet))
if err != nil {
t.Errorf("could not get labels: %s", err)
}
if len(fingerprints) != 1 {
t.Errorf("did not get a single metric as is expected, got %s", fingerprints)
}
}
// Test Definitions Below
var testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching = buildLevelDBTestPersistence("get_fingerprints_for_labelset_uses_and_for_label_matching", GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
func TestLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
}
func BenchmarkLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
}
}
var testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching = buildMemoryTestPersistence(GetFingerprintsForLabelSetUsesAndForLabelMatchingTests)
func TestMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(t *testing.T) {
testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(t)
}
func BenchmarkMemoryGetFingerprintsForLabelSetUsesAndLabelMatching(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetFingerprintsForLabelSetUsesAndForLabelMatching(b)
}
}

View file

@ -1,954 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility/test"
)
func GetValueAtTimeTests(persistenceMaker func() (metric.ViewablePersistence, test.Closer), t testing.TB) {
type value struct {
year int
month time.Month
day int
hour int
value clientmodel.SampleValue
}
type input struct {
year int
month time.Month
day int
hour int
}
type output []clientmodel.SampleValue
type behavior struct {
name string
input input
output output
}
var contexts = []struct {
name string
values []value
behaviors []behavior
}{
{
name: "no values",
values: []value{},
behaviors: []behavior{
{
name: "random target",
input: input{
year: 1984,
month: 3,
day: 30,
hour: 0,
},
},
},
},
{
name: "singleton",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
behaviors: []behavior{
{
name: "exact",
input: input{
year: 1984,
month: 3,
day: 30,
hour: 0,
},
output: output{
0,
},
},
{
name: "before",
input: input{
year: 1984,
month: 3,
day: 29,
hour: 0,
},
output: output{
0,
},
},
{
name: "after",
input: input{
year: 1984,
month: 3,
day: 31,
hour: 0,
},
output: output{
0,
},
},
},
},
{
name: "double",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
behaviors: []behavior{
{
name: "exact first",
input: input{
year: 1984,
month: 3,
day: 30,
hour: 0,
},
output: output{
0,
},
},
{
name: "exact second",
input: input{
year: 1985,
month: 3,
day: 30,
hour: 0,
},
output: output{
1,
},
},
{
name: "before first",
input: input{
year: 1983,
month: 9,
day: 29,
hour: 12,
},
output: output{
0,
},
},
{
name: "after second",
input: input{
year: 1985,
month: 9,
day: 28,
hour: 12,
},
output: output{
1,
},
},
{
name: "middle",
input: input{
year: 1984,
month: 9,
day: 28,
hour: 12,
},
output: output{
0,
1,
},
},
},
},
{
name: "triple",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
{
year: 1986,
month: 3,
day: 30,
hour: 0,
value: 2,
},
},
behaviors: []behavior{
{
name: "exact first",
input: input{
year: 1984,
month: 3,
day: 30,
hour: 0,
},
output: output{
0,
},
},
{
name: "exact second",
input: input{
year: 1985,
month: 3,
day: 30,
hour: 0,
},
output: output{
1,
},
},
{
name: "exact third",
input: input{
year: 1986,
month: 3,
day: 30,
hour: 0,
},
output: output{
2,
},
},
{
name: "before first",
input: input{
year: 1983,
month: 9,
day: 29,
hour: 12,
},
output: output{
0,
},
},
{
name: "after third",
input: input{
year: 1986,
month: 9,
day: 28,
hour: 12,
},
output: output{
2,
},
},
{
name: "first middle",
input: input{
year: 1984,
month: 9,
day: 28,
hour: 12,
},
output: output{
0,
1,
},
},
{
name: "second middle",
input: input{
year: 1985,
month: 9,
day: 28,
hour: 12,
},
output: output{
1,
2,
},
},
},
},
}
for i, context := range contexts {
// Wrapping in function to enable garbage collection of resources.
func() {
p, closer := persistenceMaker()
defer closer.Close()
defer p.Close()
m := clientmodel.Metric{
clientmodel.MetricNameLabel: "age_in_years",
}
for _, value := range context.values {
testAppendSamples(p, &clientmodel.Sample{
Value: clientmodel.SampleValue(value.value),
Timestamp: clientmodel.TimestampFromTime(time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC)),
Metric: m,
}, t)
}
for j, behavior := range context.behaviors {
input := behavior.input
time := clientmodel.TimestampFromTime(time.Date(input.year, input.month, input.day, input.hour, 0, 0, 0, time.UTC))
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromMetric(m)
actual := p.GetValueAtTime(fingerprint, time)
if len(behavior.output) != len(actual) {
t.Fatalf("%d.%d(%s.%s). Expected %d samples but got: %v\n", i, j, context.name, behavior.name, len(behavior.output), actual)
}
for k, samplePair := range actual {
if samplePair.Value != behavior.output[k] {
t.Fatalf("%d.%d.%d(%s.%s). Expected %s but got %s\n", i, j, k, context.name, behavior.name, behavior.output[k], samplePair)
}
}
}
}()
}
}
func GetRangeValuesTests(persistenceMaker func() (metric.ViewablePersistence, test.Closer), onlyBoundaries bool, t testing.TB) {
type value struct {
year int
month time.Month
day int
hour int
value clientmodel.SampleValue
}
type input struct {
openYear int
openMonth time.Month
openDay int
openHour int
endYear int
endMonth time.Month
endDay int
endHour int
}
type output struct {
year int
month time.Month
day int
hour int
value clientmodel.SampleValue
}
type behavior struct {
name string
input input
output []output
}
var contexts = []struct {
name string
values []value
behaviors []behavior
}{
{
name: "no values",
values: []value{},
behaviors: []behavior{
{
name: "non-existent interval",
input: input{
openYear: 1984,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
},
},
},
{
name: "singleton value",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
behaviors: []behavior{
{
name: "start on first value",
input: input{
openYear: 1984,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
},
{
name: "end on first value",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1984,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
},
{
name: "overlap on first value",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
},
},
},
{
name: "two values",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
behaviors: []behavior{
{
name: "start on first value",
input: input{
openYear: 1984,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
{
name: "start on second value",
input: input{
openYear: 1985,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1986,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
{
name: "end on first value",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1984,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
},
{
name: "end on second value",
input: input{
openYear: 1985,
openMonth: 1,
openDay: 1,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
{
name: "overlap on values",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1986,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
},
},
{
name: "three values",
values: []value{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
{
year: 1986,
month: 3,
day: 30,
hour: 0,
value: 2,
},
},
behaviors: []behavior{
{
name: "start on first value",
input: input{
openYear: 1984,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
{
name: "start on second value",
input: input{
openYear: 1985,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1986,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
{
year: 1986,
month: 3,
day: 30,
hour: 0,
value: 2,
},
},
},
{
name: "end on first value",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1984,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
},
},
{
name: "end on second value",
input: input{
openYear: 1985,
openMonth: 1,
openDay: 1,
openHour: 0,
endYear: 1985,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
},
},
{
name: "overlap on values",
input: input{
openYear: 1983,
openMonth: 3,
openDay: 30,
openHour: 0,
endYear: 1986,
endMonth: 3,
endDay: 30,
endHour: 0,
},
output: []output{
{
year: 1984,
month: 3,
day: 30,
hour: 0,
value: 0,
},
{
year: 1985,
month: 3,
day: 30,
hour: 0,
value: 1,
},
{
year: 1986,
month: 3,
day: 30,
hour: 0,
value: 2,
},
},
},
},
},
}
for i, context := range contexts {
// Wrapping in function to enable garbage collection of resources.
func() {
p, closer := persistenceMaker()
defer closer.Close()
defer p.Close()
m := clientmodel.Metric{
clientmodel.MetricNameLabel: "age_in_years",
}
for _, value := range context.values {
testAppendSamples(p, &clientmodel.Sample{
Value: clientmodel.SampleValue(value.value),
Timestamp: clientmodel.TimestampFromTime(time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC)),
Metric: m,
}, t)
}
for j, behavior := range context.behaviors {
input := behavior.input
open := clientmodel.TimestampFromTime(time.Date(input.openYear, input.openMonth, input.openDay, input.openHour, 0, 0, 0, time.UTC))
end := clientmodel.TimestampFromTime(time.Date(input.endYear, input.endMonth, input.endDay, input.endHour, 0, 0, 0, time.UTC))
in := metric.Interval{
OldestInclusive: open,
NewestInclusive: end,
}
actualValues := metric.Values{}
expectedValues := []output{}
fp := &clientmodel.Fingerprint{}
fp.LoadFromMetric(m)
if onlyBoundaries {
actualValues = p.GetBoundaryValues(fp, in)
l := len(behavior.output)
if l == 1 {
expectedValues = behavior.output[0:1]
}
if l > 1 {
expectedValues = append(behavior.output[0:1], behavior.output[l-1])
}
} else {
actualValues = p.GetRangeValues(fp, in)
expectedValues = behavior.output
}
if actualValues == nil && len(expectedValues) != 0 {
t.Fatalf("%d.%d(%s). Expected %v but got: %v\n", i, j, behavior.name, expectedValues, actualValues)
}
if expectedValues == nil {
if actualValues != nil {
t.Fatalf("%d.%d(%s). Expected nil values but got: %s\n", i, j, behavior.name, actualValues)
}
} else {
if len(expectedValues) != len(actualValues) {
t.Fatalf("%d.%d(%s). Expected length %d but got: %d\n", i, j, behavior.name, len(expectedValues), len(actualValues))
}
for k, actual := range actualValues {
expected := expectedValues[k]
if actual.Value != clientmodel.SampleValue(expected.value) {
t.Fatalf("%d.%d.%d(%s). Expected %v but got: %v\n", i, j, k, behavior.name, expected.value, actual.Value)
}
if actual.Timestamp.Time().Year() != expected.year {
t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.year, actual.Timestamp.Time().Year())
}
if actual.Timestamp.Time().Month() != expected.month {
t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.month, actual.Timestamp.Time().Month())
}
// XXX: Find problem here.
// Mismatches occur in this and have for a long time in the LevelDB
// case, however not im-memory.
//
// if actual.Timestamp.Day() != expected.day {
// t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.day, actual.Timestamp.Day())
// }
// if actual.Timestamp.Hour() != expected.hour {
// t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.hour, actual.Timestamp.Hour())
// }
}
}
}
}()
}
}
// Test Definitions Follow
func testMemoryGetValueAtTime(t testing.TB) {
persistenceMaker := func() (metric.ViewablePersistence, test.Closer) {
return NewMemorySeriesStorage(MemorySeriesOptions{}), test.NilCloser
}
GetValueAtTimeTests(persistenceMaker, t)
}
func TestMemoryGetValueAtTime(t *testing.T) {
testMemoryGetValueAtTime(t)
}
func BenchmarkMemoryGetValueAtTime(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetValueAtTime(b)
}
}
func TestMemoryGetBoundaryValues(t *testing.T) {
testMemoryGetBoundaryValues(t)
}
func BenchmarkMemoryGetBoundaryValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetBoundaryValues(b)
}
}
func testMemoryGetRangeValues(t testing.TB) {
persistenceMaker := func() (metric.ViewablePersistence, test.Closer) {
return NewMemorySeriesStorage(MemorySeriesOptions{}), test.NilCloser
}
GetRangeValuesTests(persistenceMaker, false, t)
}
func testMemoryGetBoundaryValues(t testing.TB) {
persistenceMaker := func() (metric.ViewablePersistence, test.Closer) {
return NewMemorySeriesStorage(MemorySeriesOptions{}), test.NilCloser
}
GetRangeValuesTests(persistenceMaker, true, t)
}
func TestMemoryGetRangeValues(t *testing.T) {
testMemoryGetRangeValues(t)
}
func BenchmarkMemoryGetRangeValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryGetRangeValues(b)
}
}

View file

@ -1,101 +0,0 @@
package tiered
import (
"math/rand"
"testing"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
)
const numTestValues = 5000
func TestValuesMarshalAndUnmarshal(t *testing.T) {
values := randomValues(numTestValues)
marshalled := marshalValues(values, nil)
unmarshalled := unmarshalValues(marshalled, nil)
for i, expected := range values {
actual := unmarshalled[i]
if !actual.Equal(&expected) {
t.Fatalf("%d. got: %v, expected: %v", i, actual, expected)
}
}
}
func randomValues(numSamples int) metric.Values {
v := make(metric.Values, 0, numSamples)
for i := 0; i < numSamples; i++ {
v = append(v, metric.SamplePair{
Timestamp: clientmodel.Timestamp(rand.Int63()),
Value: clientmodel.SampleValue(rand.NormFloat64()),
})
}
return v
}
func benchmarkMarshal(b *testing.B, n int) {
v := randomValues(n)
b.ResetTimer()
// TODO: Reuse buffer to compare performance.
// - Delta is -30 percent time overhead.
for i := 0; i < b.N; i++ {
marshalValues(v, nil)
}
}
func BenchmarkMarshal1(b *testing.B) {
benchmarkMarshal(b, 1)
}
func BenchmarkMarshal10(b *testing.B) {
benchmarkMarshal(b, 10)
}
func BenchmarkMarshal100(b *testing.B) {
benchmarkMarshal(b, 100)
}
func BenchmarkMarshal1000(b *testing.B) {
benchmarkMarshal(b, 1000)
}
func BenchmarkMarshal10000(b *testing.B) {
benchmarkMarshal(b, 10000)
}
func benchmarkUnmarshal(b *testing.B, n int) {
v := randomValues(numTestValues)
marshalled := marshalValues(v, nil)
b.ResetTimer()
// TODO: Reuse buffer to compare performance.
// - Delta is -15 percent time overhead.
for i := 0; i < b.N; i++ {
unmarshalValues(marshalled, nil)
}
}
func BenchmarkUnmarshal1(b *testing.B) {
benchmarkUnmarshal(b, 1)
}
func BenchmarkUnmarshal10(b *testing.B) {
benchmarkUnmarshal(b, 10)
}
func BenchmarkUnmarshal100(b *testing.B) {
benchmarkUnmarshal(b, 100)
}
func BenchmarkUnmarshal1000(b *testing.B) {
benchmarkUnmarshal(b, 1000)
}
func BenchmarkUnmarshal10000(b *testing.B) {
benchmarkUnmarshal(b, 10000)
}

View file

@ -1,141 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"code.google.com/p/goprotobuf/proto"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/coding/indexable"
"github.com/prometheus/prometheus/storage/metric"
dto "github.com/prometheus/prometheus/model/generated"
)
// SampleKey models the business logic around the data-transfer object
// SampleKey.
type SampleKey struct {
Fingerprint *clientmodel.Fingerprint
FirstTimestamp clientmodel.Timestamp
LastTimestamp clientmodel.Timestamp
SampleCount uint32
}
// Constrain merges the underlying SampleKey to fit within the keyspace of
// the provided first and last keys and returns whether the key was modified.
func (s *SampleKey) Constrain(first, last *SampleKey) bool {
switch {
case s.Before(first.Fingerprint, first.FirstTimestamp):
*s = *first
return true
case last.Before(s.Fingerprint, s.FirstTimestamp):
*s = *last
return true
default:
return false
}
}
// Equal returns true if this SampleKey and o have equal fingerprints,
// timestamps, and sample counts.
func (s *SampleKey) Equal(o *SampleKey) bool {
if s == o {
return true
}
if !s.Fingerprint.Equal(o.Fingerprint) {
return false
}
if !s.FirstTimestamp.Equal(o.FirstTimestamp) {
return false
}
if !s.LastTimestamp.Equal(o.LastTimestamp) {
return false
}
return s.SampleCount == o.SampleCount
}
// MayContain indicates whether the given SampleKey could potentially contain a
// value at the provided time. Even if true is emitted, that does not mean a
// satisfactory value, in fact, exists.
func (s *SampleKey) MayContain(t clientmodel.Timestamp) bool {
switch {
case t.Before(s.FirstTimestamp):
return false
case t.After(s.LastTimestamp):
return false
default:
return true
}
}
// Before returns true if the Fingerprint of this SampleKey is less than fp and
// false if it is greater. If both fingerprints are equal, the FirstTimestamp of
// this SampleKey is checked in the same way against t. If the timestamps are
// eqal, the LastTimestamp of this SampleKey is checked against t (and false is
// returned if they are equal again).
func (s *SampleKey) Before(fp *clientmodel.Fingerprint, t clientmodel.Timestamp) bool {
if s.Fingerprint.Less(fp) {
return true
}
if !s.Fingerprint.Equal(fp) {
return false
}
if s.FirstTimestamp.Before(t) {
return true
}
return s.LastTimestamp.Before(t)
}
// Dump converts this SampleKey into a DTO for use in serialization purposes.
func (s *SampleKey) Dump(d *dto.SampleKey) {
d.Reset()
fp := &dto.Fingerprint{}
dumpFingerprint(fp, s.Fingerprint)
d.Fingerprint = fp
d.Timestamp = indexable.EncodeTime(s.FirstTimestamp)
d.LastTimestamp = proto.Int64(s.LastTimestamp.Unix())
d.SampleCount = proto.Uint32(s.SampleCount)
}
func (s *SampleKey) String() string {
return fmt.Sprintf("SampleKey for %s at %s to %s with %d values.", s.Fingerprint, s.FirstTimestamp, s.LastTimestamp, s.SampleCount)
}
// Load deserializes this SampleKey from a DTO.
func (s *SampleKey) Load(d *dto.SampleKey) {
f := &clientmodel.Fingerprint{}
loadFingerprint(f, d.GetFingerprint())
s.Fingerprint = f
s.FirstTimestamp = indexable.DecodeTime(d.Timestamp)
s.LastTimestamp = clientmodel.TimestampFromUnix(d.GetLastTimestamp())
s.SampleCount = d.GetSampleCount()
}
// buildSampleKey returns the SampleKey for the given Fingerprint and Values.
func buildSampleKey(f *clientmodel.Fingerprint, v metric.Values) *SampleKey {
return &SampleKey{
Fingerprint: f,
FirstTimestamp: v[0].Timestamp,
LastTimestamp: v[len(v)-1].Timestamp,
SampleCount: uint32(len(v)),
}
}

View file

@ -1,632 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"math"
"math/rand"
"sort"
"testing"
"testing/quick"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/coding/indexable"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility/test"
dto "github.com/prometheus/prometheus/model/generated"
)
const stochasticMaximumVariance = 8
func BasicLifecycleTests(p metric.Persistence, t testing.TB) {
if p == nil {
t.Errorf("Received nil Metric Persistence.\n")
return
}
}
func ReadEmptyTests(p metric.Persistence, t testing.TB) {
hasLabelPair := func(x int) (success bool) {
fingerprints, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{
Type: metric.Equal,
Name: clientmodel.LabelName(string(x)),
Value: clientmodel.LabelValue(string(x)),
}})
if err != nil {
t.Error(err)
return
}
success = len(fingerprints) == 0
if !success {
t.Errorf("unexpected fingerprint length %d, got %d", 0, len(fingerprints))
}
return
}
err := quick.Check(hasLabelPair, nil)
if err != nil {
t.Error(err)
return
}
hasLabelName := func(x int) (success bool) {
labelName := clientmodel.LabelName(string(x))
values, err := p.GetLabelValuesForLabelName(labelName)
if err != nil {
t.Error(err)
return
}
success = len(values) == 0
if !success {
t.Errorf("unexpected values length %d, got %d", 0, len(values))
}
return
}
err = quick.Check(hasLabelName, nil)
if err != nil {
t.Error(err)
return
}
}
func AppendSampleAsPureSparseAppendTests(p metric.Persistence, t testing.TB) {
appendSample := func(x int) (success bool) {
v := clientmodel.SampleValue(x)
ts := clientmodel.TimestampFromUnix(int64(x))
labelName := clientmodel.LabelName(x)
labelValue := clientmodel.LabelValue(x)
l := clientmodel.Metric{labelName: labelValue}
sample := &clientmodel.Sample{
Value: v,
Timestamp: ts,
Metric: l,
}
err := p.AppendSamples(clientmodel.Samples{sample})
success = err == nil
if !success {
t.Error(err)
}
return
}
if err := quick.Check(appendSample, nil); err != nil {
t.Error(err)
}
}
func AppendSampleAsSparseAppendWithReadsTests(p metric.Persistence, t testing.TB) {
appendSample := func(x int) (success bool) {
v := clientmodel.SampleValue(x)
ts := clientmodel.TimestampFromUnix(int64(x))
labelName := clientmodel.LabelName(x)
labelValue := clientmodel.LabelValue(x)
l := clientmodel.Metric{labelName: labelValue}
sample := &clientmodel.Sample{
Value: v,
Timestamp: ts,
Metric: l,
}
err := p.AppendSamples(clientmodel.Samples{sample})
if err != nil {
t.Error(err)
return
}
values, err := p.GetLabelValuesForLabelName(labelName)
if err != nil {
t.Error(err)
return
}
if len(values) != 1 {
t.Errorf("expected label values count of %d, got %d", 1, len(values))
return
}
fingerprints, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{
Type: metric.Equal,
Name: labelName,
Value: labelValue,
}})
if err != nil {
t.Error(err)
return
}
if len(fingerprints) != 1 {
t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints))
return
}
return true
}
if err := quick.Check(appendSample, nil); err != nil {
t.Error(err)
}
}
func AppendSampleAsPureSingleEntityAppendTests(p metric.Persistence, t testing.TB) {
appendSample := func(x int) bool {
sample := &clientmodel.Sample{
Value: clientmodel.SampleValue(x),
Timestamp: clientmodel.TimestampFromUnix(int64(x)),
Metric: clientmodel.Metric{clientmodel.MetricNameLabel: "my_metric"},
}
err := p.AppendSamples(clientmodel.Samples{sample})
return err == nil
}
if err := quick.Check(appendSample, nil); err != nil {
t.Error(err)
}
}
func levelDBGetRangeValues(l *LevelDBPersistence, fp *clientmodel.Fingerprint, i metric.Interval) (samples metric.Values, err error) {
fpDto := &dto.Fingerprint{}
dumpFingerprint(fpDto, fp)
k := &dto.SampleKey{
Fingerprint: fpDto,
Timestamp: indexable.EncodeTime(i.OldestInclusive),
}
iterator, err := l.MetricSamples.NewIterator(true)
if err != nil {
panic(err)
}
defer iterator.Close()
for valid := iterator.Seek(k); valid; valid = iterator.Next() {
retrievedKey, err := extractSampleKey(iterator)
if err != nil {
return samples, err
}
if retrievedKey.FirstTimestamp.After(i.NewestInclusive) {
break
}
if !retrievedKey.Fingerprint.Equal(fp) {
break
}
retrievedValues := unmarshalValues(iterator.RawValue(), nil)
samples = append(samples, retrievedValues...)
}
return
}
type timeslice []clientmodel.Timestamp
func (t timeslice) Len() int {
return len(t)
}
func (t timeslice) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t timeslice) Less(i, j int) bool {
return t[i].Before(t[j])
}
func StochasticTests(persistenceMaker func() (metric.Persistence, test.Closer), t testing.TB) {
stochastic := func(x int) (success bool) {
p, closer := persistenceMaker()
defer closer.Close()
defer p.Close()
seed := rand.NewSource(int64(x))
random := rand.New(seed)
numberOfMetrics := random.Intn(stochasticMaximumVariance) + 1
numberOfSharedLabels := random.Intn(stochasticMaximumVariance)
numberOfUnsharedLabels := random.Intn(stochasticMaximumVariance)
numberOfSamples := random.Intn(stochasticMaximumVariance) + 2
numberOfRangeScans := random.Intn(stochasticMaximumVariance)
metricTimestamps := map[int]map[int64]bool{}
metricEarliestSample := map[int]int64{}
metricNewestSample := map[int]int64{}
for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ {
sample := &clientmodel.Sample{
Metric: clientmodel.Metric{},
}
v := clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex))
sample.Metric[clientmodel.MetricNameLabel] = v
for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ {
l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex))
v := clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex))
sample.Metric[l] = v
}
for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ {
l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex))
v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex))
sample.Metric[l] = v
}
timestamps := map[int64]bool{}
metricTimestamps[metricIndex] = timestamps
var newestSample int64 = math.MinInt64
var oldestSample int64 = math.MaxInt64
var nextTimestamp func() int64
nextTimestamp = func() int64 {
var candidate int64
candidate = random.Int63n(math.MaxInt32 - 1)
if _, has := timestamps[candidate]; has {
// WART
candidate = nextTimestamp()
}
timestamps[candidate] = true
if candidate < oldestSample {
oldestSample = candidate
}
if candidate > newestSample {
newestSample = candidate
}
return candidate
}
// BUG(matt): Invariant of the in-memory database assumes this.
sortedTimestamps := timeslice{}
for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ {
sortedTimestamps = append(sortedTimestamps, clientmodel.TimestampFromUnix(nextTimestamp()))
}
sort.Sort(sortedTimestamps)
for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ {
sample.Timestamp = sortedTimestamps[sampleIndex]
sample.Value = clientmodel.SampleValue(sampleIndex)
err := p.AppendSamples(clientmodel.Samples{sample})
if err != nil {
t.Error(err)
return
}
}
metricEarliestSample[metricIndex] = oldestSample
metricNewestSample[metricIndex] = newestSample
for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ {
matchers := metric.LabelMatchers{{
Type: metric.Equal,
Name: clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)),
Value: clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)),
}}
fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
t.Error(err)
return
}
if len(fingerprints) == 0 {
t.Errorf("expected fingerprint count of %d, got %d", 0, len(fingerprints))
return
}
}
}
for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ {
for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ {
labelName := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex))
labelValue := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex))
matchers := metric.LabelMatchers{{
Type: metric.Equal,
Name: labelName,
Value: labelValue,
}}
fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
t.Error(err)
return
}
if len(fingerprints) != 1 {
t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints))
return
}
}
m := clientmodel.Metric{}
m[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex))
for i := 0; i < numberOfSharedLabels; i++ {
l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", i))
v := clientmodel.LabelValue(fmt.Sprintf("label_%d", i))
m[l] = v
}
for i := 0; i < numberOfUnsharedLabels; i++ {
l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, i))
v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", i))
m[l] = v
}
for i := 0; i < numberOfRangeScans; i++ {
timestamps := metricTimestamps[metricIndex]
var first int64
var second int64
for {
firstCandidate := random.Int63n(int64(len(timestamps)))
secondCandidate := random.Int63n(int64(len(timestamps)))
smallest := int64(-1)
largest := int64(-1)
if firstCandidate == secondCandidate {
continue
} else if firstCandidate > secondCandidate {
largest = firstCandidate
smallest = secondCandidate
} else {
largest = secondCandidate
smallest = firstCandidate
}
j := int64(0)
for i := range timestamps {
if j == smallest {
first = i
} else if j == largest {
second = i
break
}
j++
}
break
}
begin := first
end := second
if second < first {
begin, end = second, first
}
interval := metric.Interval{
OldestInclusive: clientmodel.TimestampFromUnix(begin),
NewestInclusive: clientmodel.TimestampFromUnix(end),
}
samples := metric.Values{}
fp := &clientmodel.Fingerprint{}
fp.LoadFromMetric(m)
switch persistence := p.(type) {
case metric.View:
samples = persistence.GetRangeValues(fp, interval)
if len(samples) < 2 {
t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples))
}
case *LevelDBPersistence:
var err error
samples, err = levelDBGetRangeValues(persistence, fp, interval)
if err != nil {
t.Fatal(err)
}
if len(samples) < 2 {
t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples))
}
default:
t.Error("Unexpected type of metric.Persistence.")
}
}
}
return true
}
if err := quick.Check(stochastic, nil); err != nil {
t.Error(err)
}
}
// Test Definitions Follow
var testLevelDBBasicLifecycle = buildLevelDBTestPersistence("basic_lifecycle", BasicLifecycleTests)
func TestLevelDBBasicLifecycle(t *testing.T) {
testLevelDBBasicLifecycle(t)
}
func BenchmarkLevelDBBasicLifecycle(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBBasicLifecycle(b)
}
}
var testLevelDBReadEmpty = buildLevelDBTestPersistence("read_empty", ReadEmptyTests)
func TestLevelDBReadEmpty(t *testing.T) {
testLevelDBReadEmpty(t)
}
func BenchmarkLevelDBReadEmpty(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBReadEmpty(b)
}
}
var testLevelDBAppendSampleAsPureSparseAppend = buildLevelDBTestPersistence("append_sample_as_pure_sparse_append", AppendSampleAsPureSparseAppendTests)
func TestLevelDBAppendSampleAsPureSparseAppend(t *testing.T) {
testLevelDBAppendSampleAsPureSparseAppend(t)
}
func BenchmarkLevelDBAppendSampleAsPureSparseAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBAppendSampleAsPureSparseAppend(b)
}
}
var testLevelDBAppendSampleAsSparseAppendWithReads = buildLevelDBTestPersistence("append_sample_as_sparse_append_with_reads", AppendSampleAsSparseAppendWithReadsTests)
func TestLevelDBAppendSampleAsSparseAppendWithReads(t *testing.T) {
testLevelDBAppendSampleAsSparseAppendWithReads(t)
}
func BenchmarkLevelDBAppendSampleAsSparseAppendWithReads(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBAppendSampleAsSparseAppendWithReads(b)
}
}
var testLevelDBAppendSampleAsPureSingleEntityAppend = buildLevelDBTestPersistence("append_sample_as_pure_single_entity_append", AppendSampleAsPureSingleEntityAppendTests)
func TestLevelDBAppendSampleAsPureSingleEntityAppend(t *testing.T) {
testLevelDBAppendSampleAsPureSingleEntityAppend(t)
}
func BenchmarkLevelDBAppendSampleAsPureSingleEntityAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBAppendSampleAsPureSingleEntityAppend(b)
}
}
func testLevelDBStochastic(t testing.TB) {
persistenceMaker := func() (metric.Persistence, test.Closer) {
temporaryDirectory := test.NewTemporaryDirectory("test_leveldb_stochastic", t)
p, err := NewLevelDBPersistence(temporaryDirectory.Path())
if err != nil {
t.Errorf("Could not start up LevelDB: %q\n", err)
}
return p, temporaryDirectory
}
StochasticTests(persistenceMaker, t)
}
func TestLevelDBStochastic(t *testing.T) {
testLevelDBStochastic(t)
}
func BenchmarkLevelDBStochastic(b *testing.B) {
for i := 0; i < b.N; i++ {
testLevelDBStochastic(b)
}
}
var testMemoryBasicLifecycle = buildMemoryTestPersistence(BasicLifecycleTests)
func TestMemoryBasicLifecycle(t *testing.T) {
testMemoryBasicLifecycle(t)
}
func BenchmarkMemoryBasicLifecycle(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryBasicLifecycle(b)
}
}
var testMemoryReadEmpty = buildMemoryTestPersistence(ReadEmptyTests)
func TestMemoryReadEmpty(t *testing.T) {
testMemoryReadEmpty(t)
}
func BenchmarkMemoryReadEmpty(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryReadEmpty(b)
}
}
var testMemoryAppendSampleAsPureSparseAppend = buildMemoryTestPersistence(AppendSampleAsPureSparseAppendTests)
func TestMemoryAppendSampleAsPureSparseAppend(t *testing.T) {
testMemoryAppendSampleAsPureSparseAppend(t)
}
func BenchmarkMemoryAppendSampleAsPureSparseAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryAppendSampleAsPureSparseAppend(b)
}
}
var testMemoryAppendSampleAsSparseAppendWithReads = buildMemoryTestPersistence(AppendSampleAsSparseAppendWithReadsTests)
func TestMemoryAppendSampleAsSparseAppendWithReads(t *testing.T) {
testMemoryAppendSampleAsSparseAppendWithReads(t)
}
func BenchmarkMemoryAppendSampleAsSparseAppendWithReads(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryAppendSampleAsSparseAppendWithReads(b)
}
}
var testMemoryAppendSampleAsPureSingleEntityAppend = buildMemoryTestPersistence(AppendSampleAsPureSingleEntityAppendTests)
func TestMemoryAppendSampleAsPureSingleEntityAppend(t *testing.T) {
testMemoryAppendSampleAsPureSingleEntityAppend(t)
}
func BenchmarkMemoryAppendSampleAsPureSingleEntityAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryAppendSampleAsPureSingleEntityAppend(b)
}
}
func testMemoryStochastic(t testing.TB) {
persistenceMaker := func() (metric.Persistence, test.Closer) {
return NewMemorySeriesStorage(MemorySeriesOptions{}), test.NilCloser
}
StochasticTests(persistenceMaker, t)
}
func TestMemoryStochastic(t *testing.T) {
testMemoryStochastic(t)
}
func BenchmarkMemoryStochastic(b *testing.B) {
for i := 0; i < b.N; i++ {
testMemoryStochastic(b)
}
}

View file

@ -1,131 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/utility/test"
)
var (
// ``hg clone https://code.google.com/p/go ; cd go ; hg log | tail -n 20``
usEastern, _ = time.LoadLocation("US/Eastern")
testInstant = clientmodel.TimestampFromTime(time.Date(1972, 7, 18, 19, 5, 45, 0, usEastern).In(time.UTC))
)
func testAppendSamples(p metric.Persistence, s *clientmodel.Sample, t testing.TB) {
err := p.AppendSamples(clientmodel.Samples{s})
if err != nil {
t.Fatal(err)
}
}
func buildLevelDBTestPersistencesMaker(name string, t testing.TB) func() (metric.Persistence, test.Closer) {
return func() (metric.Persistence, test.Closer) {
temporaryDirectory := test.NewTemporaryDirectory("get_value_at_time", t)
p, err := NewLevelDBPersistence(temporaryDirectory.Path())
if err != nil {
t.Errorf("Could not start up LevelDB: %q\n", err)
}
return p, temporaryDirectory
}
}
func buildLevelDBTestPersistence(name string, f func(p metric.Persistence, t testing.TB)) func(t testing.TB) {
return func(t testing.TB) {
temporaryDirectory := test.NewTemporaryDirectory(fmt.Sprintf("test_leveldb_%s", name), t)
defer temporaryDirectory.Close()
p, err := NewLevelDBPersistence(temporaryDirectory.Path())
if err != nil {
t.Errorf("Could not create LevelDB Metric Persistence: %q\n", err)
}
defer p.Close()
f(p, t)
}
}
func buildMemoryTestPersistence(f func(p metric.Persistence, t testing.TB)) func(t testing.TB) {
return func(t testing.TB) {
p := NewMemorySeriesStorage(MemorySeriesOptions{})
defer p.Close()
f(p, t)
}
}
type testTieredStorageCloser struct {
storage *TieredStorage
directory test.Closer
}
func (t *testTieredStorageCloser) Close() {
t.storage.Close()
t.directory.Close()
}
func NewTestTieredStorage(t testing.TB) (*TieredStorage, test.Closer) {
directory := test.NewTemporaryDirectory("test_tiered_storage", t)
storage, err := NewTieredStorage(2500, 1000, 5*time.Second, 0, directory.Path())
if err != nil {
if storage != nil {
storage.Close()
}
directory.Close()
t.Fatalf("Error creating storage: %s", err)
}
if storage == nil {
directory.Close()
t.Fatalf("storage == nil")
}
started := make(chan bool)
go storage.Serve(started)
<-started
closer := &testTieredStorageCloser{
storage: storage,
directory: directory,
}
return storage, closer
}
func labelMatchersFromLabelSet(l clientmodel.LabelSet) metric.LabelMatchers {
m := make(metric.LabelMatchers, 0, len(l))
for k, v := range l {
m = append(m, &metric.LabelMatcher{
Type: metric.Equal,
Name: k,
Value: v,
})
}
return m
}

View file

@ -1,813 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"fmt"
"os"
"sort"
"sync"
"time"
"github.com/golang/glog"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility"
)
// Constants for instrumentation.
const (
namespace = "prometheus"
operation = "operation"
success = "success"
failure = "failure"
result = "result"
appendSample = "append_sample"
appendSamples = "append_samples"
flushMemory = "flush_memory"
getLabelValuesForLabelName = "get_label_values_for_label_name"
getFingerprintsForLabelMatchers = "get_fingerprints_for_label_matchers"
getMetricForFingerprint = "get_metric_for_fingerprint"
hasIndexMetric = "has_index_metric"
refreshHighWatermarks = "refresh_high_watermarks"
renderView = "render_view"
queue = "queue"
appendToDisk = "append_to_disk"
viewGeneration = "view_generation"
)
var (
storageLatency = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "metric_disk_latency_milliseconds",
Help: "Latency for metric disk operations (includes any storage drive even if it is not strictly a disk, e.g. SSD).",
Objectives: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
},
[]string{operation, result},
)
storedSamplesCount = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "stored_samples_total",
Help: "The number of samples that have been stored.",
})
)
func init() {
prometheus.MustRegister(storageLatency)
prometheus.MustRegister(storedSamplesCount)
}
type chunk metric.Values
// TruncateBefore returns a subslice of the original such that extraneous
// samples in the collection that occur before the provided time are
// dropped. The original slice is not mutated. It works with the assumption
// that consumers of these values could want preceding values if none would
// exist prior to the defined time.
func (c chunk) TruncateBefore(t clientmodel.Timestamp) chunk {
index := sort.Search(len(c), func(i int) bool {
timestamp := c[i].Timestamp
return !timestamp.Before(t)
})
switch index {
case 0:
return c
case len(c):
return c[len(c)-1:]
default:
return c[index-1:]
}
}
type tieredStorageState uint
const (
tieredStorageStarting tieredStorageState = iota
tieredStorageServing
tieredStorageDraining
tieredStorageStopping
)
// Ignore timeseries in queries that are more stale than this limit.
const stalenessLimit = time.Minute * 5
// TieredStorage both persists samples and generates materialized views for
// queries.
type TieredStorage struct {
// mu is purely used for state transitions.
mu sync.RWMutex
// BUG(matt): This introduces a Law of Demeter violation. Ugh.
DiskStorage *LevelDBPersistence
appendToDiskQueue chan clientmodel.Samples
memoryArena *memorySeriesStorage
memoryTTL time.Duration
flushMemoryInterval time.Duration
ViewQueue chan viewJob
draining chan chan<- bool
state tieredStorageState
memorySemaphore chan bool
wmCache *watermarkCache
Indexer MetricIndexer
flushSema chan bool
dtoSampleKeys *dtoSampleKeyList
sampleKeys *sampleKeyList
queueLength *prometheus.GaugeVec
queueCapacity *prometheus.GaugeVec
}
// viewJob encapsulates a request to extract sample values from the datastore.
type viewJob struct {
builder metric.ViewRequestBuilder
output chan metric.View
abort chan bool
err chan error
stats *stats.TimerGroup
}
const (
tieredMemorySemaphores = 5
watermarkCacheLimit = 1024 * 1024
)
// NewTieredStorage returns a TieredStorage object ready to use.
func NewTieredStorage(
appendToDiskQueueDepth,
viewQueueDepth uint,
flushMemoryInterval time.Duration,
memoryTTL time.Duration,
rootDirectory string,
) (*TieredStorage, error) {
if isDir, _ := utility.IsDir(rootDirectory); !isDir {
if err := os.MkdirAll(rootDirectory, 0755); err != nil {
return nil, fmt.Errorf("could not find or create metrics directory %s: %s", rootDirectory, err)
}
}
diskStorage, err := NewLevelDBPersistence(rootDirectory)
if err != nil {
return nil, err
}
wmCache := &watermarkCache{
C: utility.NewSynchronizedCache(utility.NewLRUCache(watermarkCacheLimit)),
}
memOptions := MemorySeriesOptions{
WatermarkCache: wmCache,
}
s := &TieredStorage{
appendToDiskQueue: make(chan clientmodel.Samples, appendToDiskQueueDepth),
DiskStorage: diskStorage,
draining: make(chan chan<- bool),
flushMemoryInterval: flushMemoryInterval,
memoryArena: NewMemorySeriesStorage(memOptions),
memoryTTL: memoryTTL,
ViewQueue: make(chan viewJob, viewQueueDepth),
memorySemaphore: make(chan bool, tieredMemorySemaphores),
wmCache: wmCache,
flushSema: make(chan bool, 1),
dtoSampleKeys: newDtoSampleKeyList(10),
sampleKeys: newSampleKeyList(10),
queueLength: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "storage_queue_length",
Help: "The number of items in the storage queues.",
},
[]string{queue},
),
queueCapacity: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "storage_queue_capacity",
Help: "The capacity of the storage queues.",
},
[]string{queue},
),
}
s.queueCapacity.WithLabelValues(appendToDisk).Set(float64(appendToDiskQueueDepth))
s.queueCapacity.WithLabelValues(viewGeneration).Set(float64(viewQueueDepth))
for i := 0; i < tieredMemorySemaphores; i++ {
s.memorySemaphore <- true
}
return s, nil
}
// AppendSamples enqueues Samples for storage.
func (t *TieredStorage) AppendSamples(samples clientmodel.Samples) (err error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.state != tieredStorageServing {
return fmt.Errorf("storage is not serving")
}
t.memoryArena.AppendSamples(samples)
storedSamplesCount.Add(float64(len(samples)))
return
}
// Drain stops the storage subsystem, flushing all pending operations.
func (t *TieredStorage) Drain(drained chan<- bool) {
t.mu.Lock()
defer t.mu.Unlock()
t.drain(drained)
}
func (t *TieredStorage) drain(drained chan<- bool) {
if t.state >= tieredStorageDraining {
panic("Illegal State: Supplemental drain requested.")
}
t.state = tieredStorageDraining
glog.Info("Triggering drain...")
t.draining <- (drained)
}
// NewViewRequestBuilder furnishes a ViewRequestBuilder for remarking what types
// of queries to perform.
func (t *TieredStorage) NewViewRequestBuilder() metric.ViewRequestBuilder {
return &viewRequestBuilder{storage: t}
}
// makeView materializes a View according to a ViewRequestBuilder, subject to a
// timeout.
func (t *TieredStorage) makeView(builder metric.ViewRequestBuilder, deadline time.Duration, queryStats *stats.TimerGroup) (metric.View, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.state != tieredStorageServing {
return nil, fmt.Errorf("storage is not serving")
}
// The result channel needs a one-element buffer in case we have timed
// out in makeView, but the view rendering still completes afterwards
// and writes to the channel.
result := make(chan metric.View, 1)
// The abort channel needs a one-element buffer in case the view
// rendering has already exited and doesn't consume from the channel
// anymore.
abortChan := make(chan bool, 1)
errChan := make(chan error)
queryStats.GetTimer(stats.ViewQueueTime).Start()
t.ViewQueue <- viewJob{
builder: builder,
output: result,
abort: abortChan,
err: errChan,
stats: queryStats,
}
select {
case view := <-result:
return view, nil
case err := <-errChan:
return nil, err
case <-time.After(deadline):
abortChan <- true
return nil, fmt.Errorf("fetching query data timed out after %s", deadline)
}
}
// Serve starts serving requests.
func (t *TieredStorage) Serve(started chan<- bool) {
t.mu.Lock()
if t.state != tieredStorageStarting {
panic("Illegal State: Attempted to restart TieredStorage.")
}
t.state = tieredStorageServing
t.mu.Unlock()
flushMemoryTicker := time.NewTicker(t.flushMemoryInterval)
defer flushMemoryTicker.Stop()
started <- true
for {
select {
case <-flushMemoryTicker.C:
select {
case t.flushSema <- true:
go func() {
t.flushMemory(t.memoryTTL)
<-t.flushSema
}()
default:
glog.Warning("Backlogging on flush...")
}
case viewRequest := <-t.ViewQueue:
<-t.memorySemaphore
viewRequest.stats.GetTimer(stats.ViewQueueTime).Stop()
go t.renderView(viewRequest)
case drainingDone := <-t.draining:
t.Flush()
drainingDone <- true
return
}
}
}
// Flush flushes all samples to disk.
func (t *TieredStorage) Flush() {
t.flushSema <- true
t.flushMemory(0)
<-t.flushSema
}
func (t *TieredStorage) flushMemory(ttl time.Duration) {
flushOlderThan := clientmodel.Now().Add(-1 * ttl)
glog.Info("Flushing samples to disk...")
t.memoryArena.Flush(flushOlderThan, t.appendToDiskQueue)
queueLength := len(t.appendToDiskQueue)
if queueLength > 0 {
samples := clientmodel.Samples{}
for i := 0; i < queueLength; i++ {
chunk := <-t.appendToDiskQueue
samples = append(samples, chunk...)
}
glog.Infof("Writing %d samples...", len(samples))
t.DiskStorage.AppendSamples(samples)
}
t.memoryArena.Evict(flushOlderThan)
glog.Info("Done flushing.")
}
// Close stops serving, flushes all pending operations, and frees all resources.
func (t *TieredStorage) Close() {
t.mu.Lock()
defer t.mu.Unlock()
t.close()
}
func (t *TieredStorage) close() {
if t.state == tieredStorageStopping {
panic("Illegal State: Attempted to restop TieredStorage.")
}
drained := make(chan bool)
t.drain(drained)
<-drained
t.memoryArena.Close()
t.DiskStorage.Close()
// BUG(matt): There is a probability that pending items may hang here
// and not get flushed.
close(t.appendToDiskQueue)
close(t.ViewQueue)
t.wmCache.Clear()
t.dtoSampleKeys.Close()
t.sampleKeys.Close()
t.state = tieredStorageStopping
}
func (t *TieredStorage) seriesTooOld(f *clientmodel.Fingerprint, i clientmodel.Timestamp) (bool, error) {
// BUG(julius): Make this configurable by query layer.
i = i.Add(-stalenessLimit)
wm, cacheHit, _ := t.wmCache.Get(f)
if !cacheHit {
if t.memoryArena.HasFingerprint(f) {
samples := t.memoryArena.CloneSamples(f)
if len(samples) > 0 {
newest := samples[len(samples)-1].Timestamp
t.wmCache.Put(f, &watermarks{High: newest})
return newest.Before(i), nil
}
}
highTime, diskHit, err := t.DiskStorage.MetricHighWatermarks.Get(f)
if err != nil {
return false, err
}
if diskHit {
t.wmCache.Put(f, &watermarks{High: highTime})
return highTime.Before(i), nil
}
t.wmCache.Put(f, &watermarks{})
return true, nil
}
return wm.High.Before(i), nil
}
func (t *TieredStorage) renderView(viewJob viewJob) {
// Telemetry.
var err error
begin := time.Now()
defer func() {
t.memorySemaphore <- true
if err == nil {
storageLatency.With(
prometheus.Labels{operation: renderView, result: success},
).Observe(
float64(time.Since(begin) / time.Millisecond),
)
} else {
storageLatency.With(
prometheus.Labels{operation: renderView, result: failure},
).Observe(
float64(time.Since(begin) / time.Millisecond),
)
}
}()
view := newView()
var iterator leveldb.Iterator
diskPresent := true
firstBlock, _ := t.sampleKeys.Get()
defer t.sampleKeys.Give(firstBlock)
lastBlock, _ := t.sampleKeys.Get()
defer t.sampleKeys.Give(lastBlock)
sampleKeyDto, _ := t.dtoSampleKeys.Get()
defer t.dtoSampleKeys.Give(sampleKeyDto)
defer func() {
// Give back all ops not yet popped.
for viewJob.builder.HasOp() {
giveBackOp(viewJob.builder.PopOp())
}
}()
extractionTimer := viewJob.stats.GetTimer(stats.ViewDataExtractionTime).Start()
for viewJob.builder.HasOp() {
op := viewJob.builder.PopOp()
defer giveBackOp(op)
fp := op.Fingerprint()
old, err := t.seriesTooOld(fp, op.CurrentTime())
if err != nil {
glog.Errorf("Error getting watermark from cache for %s: %s", fp, err)
continue
}
if old {
continue
}
memValues := t.memoryArena.CloneSamples(fp)
for !op.Consumed() {
// Abort the view rendering if the caller (makeView) has timed out.
if len(viewJob.abort) > 0 {
return
}
// Load data value chunk(s) around the current time.
targetTime := op.CurrentTime()
currentChunk := chunk{}
// If we aimed before the oldest value in memory, load more data from disk.
if (len(memValues) == 0 || memValues.FirstTimeAfter(targetTime)) && diskPresent {
if iterator == nil {
// Get a single iterator that will be used for all data extraction
// below.
iterator, _ = t.DiskStorage.MetricSamples.NewIterator(true)
defer iterator.Close()
if diskPresent = iterator.SeekToLast(); diskPresent {
if err := iterator.Key(sampleKeyDto); err != nil {
panic(err)
}
lastBlock.Load(sampleKeyDto)
if !iterator.SeekToFirst() {
diskPresent = false
} else {
if err := iterator.Key(sampleKeyDto); err != nil {
panic(err)
}
firstBlock.Load(sampleKeyDto)
}
}
}
if diskPresent {
diskTimer := viewJob.stats.GetTimer(stats.ViewDiskExtractionTime).Start()
diskValues, expired := t.loadChunkAroundTime(
iterator,
fp,
targetTime,
firstBlock,
lastBlock,
)
if expired {
diskPresent = false
}
diskTimer.Stop()
// If we aimed past the newest value on disk,
// combine it with the next value from memory.
if len(diskValues) == 0 {
currentChunk = chunk(memValues)
} else {
if len(memValues) > 0 && diskValues.LastTimeBefore(targetTime) {
latestDiskValue := diskValues[len(diskValues)-1:]
currentChunk = append(chunk(latestDiskValue), chunk(memValues)...)
} else {
currentChunk = chunk(diskValues)
}
}
} else {
currentChunk = chunk(memValues)
}
} else {
currentChunk = chunk(memValues)
}
// There's no data at all for this fingerprint, so stop processing.
if len(currentChunk) == 0 {
break
}
currentChunk = currentChunk.TruncateBefore(targetTime)
lastChunkTime := currentChunk[len(currentChunk)-1].Timestamp
if lastChunkTime.After(targetTime) {
targetTime = lastChunkTime
}
if op.CurrentTime().After(targetTime) {
break
}
// Extract all needed data from the current chunk and append the
// extracted samples to the materialized view.
for !op.Consumed() && !op.CurrentTime().After(targetTime) {
view.appendSamples(fp, op.ExtractSamples(metric.Values(currentChunk)))
}
}
}
extractionTimer.Stop()
viewJob.output <- view
return
}
func (t *TieredStorage) loadChunkAroundTime(
iterator leveldb.Iterator,
fingerprint *clientmodel.Fingerprint,
ts clientmodel.Timestamp,
firstBlock,
lastBlock *SampleKey,
) (chunk metric.Values, expired bool) {
if fingerprint.Less(firstBlock.Fingerprint) {
return nil, false
}
if lastBlock.Fingerprint.Less(fingerprint) {
return nil, true
}
seekingKey, _ := t.sampleKeys.Get()
defer t.sampleKeys.Give(seekingKey)
seekingKey.Fingerprint = fingerprint
if fingerprint.Equal(firstBlock.Fingerprint) && ts.Before(firstBlock.FirstTimestamp) {
seekingKey.FirstTimestamp = firstBlock.FirstTimestamp
} else if fingerprint.Equal(lastBlock.Fingerprint) && ts.After(lastBlock.FirstTimestamp) {
seekingKey.FirstTimestamp = lastBlock.FirstTimestamp
} else {
seekingKey.FirstTimestamp = ts
}
dto, _ := t.dtoSampleKeys.Get()
defer t.dtoSampleKeys.Give(dto)
seekingKey.Dump(dto)
if !iterator.Seek(dto) {
return chunk, true
}
var foundValues metric.Values
if err := iterator.Key(dto); err != nil {
panic(err)
}
seekingKey.Load(dto)
if seekingKey.Fingerprint.Equal(fingerprint) {
// Figure out if we need to rewind by one block.
// Imagine the following supertime blocks with time ranges:
//
// Block 1: ft 1000 - lt 1009 <data>
// Block 1: ft 1010 - lt 1019 <data>
//
// If we are aiming to find time 1005, we would first seek to the block with
// supertime 1010, then need to rewind by one block by virtue of LevelDB
// iterator seek behavior.
//
// Only do the rewind if there is another chunk before this one.
if !seekingKey.MayContain(ts) {
postValues := unmarshalValues(iterator.RawValue(), nil)
if !seekingKey.Equal(firstBlock) {
if !iterator.Previous() {
panic("This should never return false.")
}
if err := iterator.Key(dto); err != nil {
panic(err)
}
seekingKey.Load(dto)
if !seekingKey.Fingerprint.Equal(fingerprint) {
return postValues, false
}
foundValues = unmarshalValues(iterator.RawValue(), nil)
foundValues = append(foundValues, postValues...)
return foundValues, false
}
}
foundValues = unmarshalValues(iterator.RawValue(), nil)
return foundValues, false
}
if fingerprint.Less(seekingKey.Fingerprint) {
if !seekingKey.Equal(firstBlock) {
if !iterator.Previous() {
panic("This should never return false.")
}
if err := iterator.Key(dto); err != nil {
panic(err)
}
seekingKey.Load(dto)
if !seekingKey.Fingerprint.Equal(fingerprint) {
return nil, false
}
foundValues = unmarshalValues(iterator.RawValue(), nil)
return foundValues, false
}
}
panic("illegal state: violated sort invariant")
}
// GetAllValuesForLabel gets all label values that are associated with the
// provided label name.
func (t *TieredStorage) GetAllValuesForLabel(labelName clientmodel.LabelName) (clientmodel.LabelValues, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.state != tieredStorageServing {
panic("Illegal State: Attempted to query non-running TieredStorage.")
}
diskValues, err := t.DiskStorage.GetAllValuesForLabel(labelName)
if err != nil {
return nil, err
}
memoryValues, err := t.memoryArena.GetAllValuesForLabel(labelName)
if err != nil {
return nil, err
}
valueSet := map[clientmodel.LabelValue]bool{}
values := clientmodel.LabelValues{}
for _, value := range append(diskValues, memoryValues...) {
if !valueSet[value] {
values = append(values, value)
valueSet[value] = true
}
}
return values, nil
}
// GetFingerprintsForLabelMatchers gets all of the metric fingerprints that are
// associated with the provided label matchers.
func (t *TieredStorage) GetFingerprintsForLabelMatchers(matchers metric.LabelMatchers) (clientmodel.Fingerprints, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.state != tieredStorageServing {
panic("Illegal State: Attempted to query non-running TieredStorage.")
}
memFingerprints, err := t.memoryArena.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
return nil, err
}
diskFingerprints, err := t.DiskStorage.GetFingerprintsForLabelMatchers(matchers)
if err != nil {
return nil, err
}
fingerprintSet := map[clientmodel.Fingerprint]bool{}
for _, fingerprint := range append(memFingerprints, diskFingerprints...) {
fingerprintSet[*fingerprint] = true
}
fingerprints := clientmodel.Fingerprints{}
for fingerprint := range fingerprintSet {
fpCopy := fingerprint
fingerprints = append(fingerprints, &fpCopy)
}
return fingerprints, nil
}
// Get all of the label values that are associated with a given label name.
func (t *TieredStorage) GetLabelValuesForLabelName(clientmodel.LabelName) (clientmodel.LabelValues, error) {
// TODO(julius): Implement this or decide what to do with this
// Persistence interface method. It's currently unused on the
// TieredStorage, but used on the LevelDBPersistence and the
// memorySeriesStorage.
panic("not implemented")
}
// GetMetricForFingerprint gets the metric associated with the provided
// fingerprint.
func (t *TieredStorage) GetMetricForFingerprint(f *clientmodel.Fingerprint) (clientmodel.Metric, error) {
t.mu.RLock()
defer t.mu.RUnlock()
if t.state != tieredStorageServing {
panic("Illegal State: Attempted to query non-running TieredStorage.")
}
m, err := t.memoryArena.GetMetricForFingerprint(f)
if err != nil {
return nil, err
}
if m == nil {
m, err = t.DiskStorage.GetMetricForFingerprint(f)
t.memoryArena.CreateEmptySeries(m)
}
return m, err
}
// Describe implements prometheus.Collector.
func (t *TieredStorage) Describe(ch chan<- *prometheus.Desc) {
t.queueLength.Describe(ch)
t.queueCapacity.Describe(ch)
}
// Collect implements prometheus.Collector.
func (t *TieredStorage) Collect(ch chan<- prometheus.Metric) {
t.queueLength.WithLabelValues(appendToDisk).Set(float64(len(t.appendToDiskQueue)))
t.queueLength.WithLabelValues(viewGeneration).Set(float64(len(t.ViewQueue)))
t.queueLength.Collect(ch)
t.queueCapacity.Collect(ch)
}

File diff suppressed because it is too large Load diff

View file

@ -1,100 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"container/heap"
"time"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/stats"
"github.com/prometheus/prometheus/storage/metric"
)
// viewRequestBuilder contains the various requests for data.
type viewRequestBuilder struct {
storage *TieredStorage
operations ops
}
var getValuesAtTimes = newValueAtTimeList(10 * 1024)
// GetMetricAtTime implements ViewRequestBuilder.
func (v *viewRequestBuilder) GetMetricAtTime(fp *clientmodel.Fingerprint, time clientmodel.Timestamp) {
heap.Push(&v.operations, getValuesAtTimes.Get(fp, time))
}
var getValuesAtIntervals = newValueAtIntervalList(10 * 1024)
// GetMetricAtInterval implements ViewRequestBuilder.
func (v *viewRequestBuilder) GetMetricAtInterval(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval time.Duration) {
heap.Push(&v.operations, getValuesAtIntervals.Get(fp, from, through, interval))
}
var getValuesAlongRanges = newValueAlongRangeList(10 * 1024)
// GetMetricRange implements ViewRequestBuilder.
func (v *viewRequestBuilder) GetMetricRange(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp) {
heap.Push(&v.operations, getValuesAlongRanges.Get(fp, from, through))
}
var getValuesAtIntervalAlongRanges = newValueAtIntervalAlongRangeList(10 * 1024)
// GetMetricRangeAtInterval implements ViewRequestBuilder.
func (v *viewRequestBuilder) GetMetricRangeAtInterval(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration) {
heap.Push(&v.operations, getValuesAtIntervalAlongRanges.Get(fp, from, through, interval, rangeDuration))
}
// Execute implements ViewRequestBuilder.
func (v *viewRequestBuilder) Execute(deadline time.Duration, queryStats *stats.TimerGroup) (metric.View, error) {
return v.storage.makeView(v, deadline, queryStats)
}
// PopOp implements ViewRequestBuilder.
func (v *viewRequestBuilder) PopOp() metric.Op {
return heap.Pop(&v.operations).(metric.Op)
}
// HasOp implements ViewRequestBuilder.
func (v *viewRequestBuilder) HasOp() bool {
return v.operations.Len() > 0
}
type view struct {
*memorySeriesStorage
}
func (v view) appendSamples(fingerprint *clientmodel.Fingerprint, samples metric.Values) {
v.memorySeriesStorage.appendSamplesWithoutIndexing(fingerprint, samples)
}
func newView() view {
return view{NewMemorySeriesStorage(MemorySeriesOptions{})}
}
func giveBackOp(op interface{}) bool {
switch v := op.(type) {
case *getValuesAtTimeOp:
return getValuesAtTimes.Give(v)
case *getValuesAtIntervalOp:
return getValuesAtIntervals.Give(v)
case *getValuesAlongRangeOp:
return getValuesAlongRanges.Give(v)
case *getValueRangeAtIntervalOp:
return getValuesAtIntervalAlongRanges.Give(v)
default:
panic("unrecognized operation")
}
}

View file

@ -1,194 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"testing"
"time"
clientmodel "github.com/prometheus/client_golang/model"
)
func testBuilder(t testing.TB) {
type atTime struct {
fingerprint string
time clientmodel.Timestamp
}
type atInterval struct {
fingerprint string
from clientmodel.Timestamp
through clientmodel.Timestamp
interval time.Duration
}
type atRange struct {
fingerprint string
from clientmodel.Timestamp
through clientmodel.Timestamp
}
type in struct {
atTimes []atTime
atIntervals []atInterval
atRanges []atRange
}
type out []struct {
fingerprint string
operations ops
}
var scenarios = []struct {
in in
out out
}{
// Ensure that the fingerprint is sorted in proper order.
{
in: in{
atTimes: []atTime{
{
fingerprint: "0000000000000001111-a-4-a",
time: clientmodel.TimestampFromUnix(100),
},
{
fingerprint: "0000000000000000000-a-4-a",
time: clientmodel.TimestampFromUnix(100),
},
},
},
out: out{
{
fingerprint: "00000000000000000000-a-4-a",
},
{
fingerprint: "00000000000000001111-a-4-a",
},
},
},
// // Ensure that the fingerprint-timestamp pairs are sorted in proper order.
{
in: in{
atTimes: []atTime{
{
fingerprint: "1111-a-4-a",
time: clientmodel.TimestampFromUnix(100),
},
{
fingerprint: "1111-a-4-a",
time: clientmodel.TimestampFromUnix(200),
},
{
fingerprint: "0-a-4-a",
time: clientmodel.TimestampFromUnix(100),
},
{
fingerprint: "0-a-4-a",
time: clientmodel.TimestampFromUnix(0),
},
},
},
out: out{
{
fingerprint: "00000000000000000000-a-4-a",
},
{
fingerprint: "00000000000000000000-a-4-a",
},
{
fingerprint: "00000000000000001111-a-4-a",
},
{
fingerprint: "00000000000000001111-a-4-a",
},
},
},
// Ensure grouping of operations
{
in: in{
atTimes: []atTime{
{
fingerprint: "1111-a-4-a",
time: clientmodel.TimestampFromUnix(100),
},
},
atRanges: []atRange{
{
fingerprint: "1111-a-4-a",
from: clientmodel.TimestampFromUnix(100),
through: clientmodel.TimestampFromUnix(1000),
},
{
fingerprint: "1111-a-4-a",
from: clientmodel.TimestampFromUnix(100),
through: clientmodel.TimestampFromUnix(9000),
},
},
},
out: out{
{
fingerprint: "00000000000000001111-a-4-a",
},
{
fingerprint: "00000000000000001111-a-4-a",
},
{
fingerprint: "00000000000000001111-a-4-a",
},
},
},
}
for i, scenario := range scenarios {
builder := &viewRequestBuilder{}
for _, atTime := range scenario.in.atTimes {
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromString(atTime.fingerprint)
builder.GetMetricAtTime(fingerprint, atTime.time)
}
for _, atInterval := range scenario.in.atIntervals {
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromString(atInterval.fingerprint)
builder.GetMetricAtInterval(fingerprint, atInterval.from, atInterval.through, atInterval.interval)
}
for _, atRange := range scenario.in.atRanges {
fingerprint := &clientmodel.Fingerprint{}
fingerprint.LoadFromString(atRange.fingerprint)
builder.GetMetricRange(fingerprint, atRange.from, atRange.through)
}
for j, job := range scenario.out {
got := builder.PopOp()
if got.Fingerprint().String() != job.fingerprint {
t.Errorf("%d.%d. expected fingerprint %s, got %s", i, j, job.fingerprint, got.Fingerprint())
}
}
if builder.HasOp() {
t.Error("Expected builder to have no scan jobs left.")
}
}
}
func TestBuilder(t *testing.T) {
testBuilder(t)
}
func BenchmarkBuilder(b *testing.B) {
for i := 0; i < b.N; i++ {
testBuilder(b)
}
}

View file

@ -1,189 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tiered
import (
"code.google.com/p/goprotobuf/proto"
clientmodel "github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility"
dto "github.com/prometheus/prometheus/model/generated"
)
type watermarks struct {
High clientmodel.Timestamp
}
func (w *watermarks) load(d *dto.MetricHighWatermark) {
w.High = clientmodel.TimestampFromUnix(d.GetTimestamp())
}
func (w *watermarks) dump(d *dto.MetricHighWatermark) {
d.Reset()
d.Timestamp = proto.Int64(w.High.Unix())
}
// A FingerprintHighWatermarkMapping is used for batch updates of many high
// watermarks in a database.
type FingerprintHighWatermarkMapping map[clientmodel.Fingerprint]clientmodel.Timestamp
// HighWatermarker models a high-watermark database.
type HighWatermarker interface {
raw.Database
raw.ForEacher
raw.Pruner
UpdateBatch(FingerprintHighWatermarkMapping) error
Get(*clientmodel.Fingerprint) (t clientmodel.Timestamp, ok bool, err error)
}
// LevelDBHighWatermarker is an implementation of HighWatermarker backed by
// leveldb.
type LevelDBHighWatermarker struct {
*leveldb.LevelDBPersistence
}
// Get implements HighWatermarker.
func (w *LevelDBHighWatermarker) Get(f *clientmodel.Fingerprint) (t clientmodel.Timestamp, ok bool, err error) {
k := &dto.Fingerprint{}
dumpFingerprint(k, f)
v := &dto.MetricHighWatermark{}
ok, err = w.LevelDBPersistence.Get(k, v)
if err != nil {
return t, ok, err
}
if !ok {
return clientmodel.TimestampFromUnix(0), ok, nil
}
t = clientmodel.TimestampFromUnix(v.GetTimestamp())
return t, true, nil
}
// UpdateBatch implements HighWatermarker.
func (w *LevelDBHighWatermarker) UpdateBatch(m FingerprintHighWatermarkMapping) error {
batch := leveldb.NewBatch()
defer batch.Close()
for fp, t := range m {
existing, present, err := w.Get(&fp)
if err != nil {
return err
}
k := &dto.Fingerprint{}
dumpFingerprint(k, &fp)
v := &dto.MetricHighWatermark{}
if !present {
v.Timestamp = proto.Int64(t.Unix())
batch.Put(k, v)
continue
}
// BUG(matt): Replace this with watermark management.
if t.After(existing) {
v.Timestamp = proto.Int64(t.Unix())
batch.Put(k, v)
}
}
return w.LevelDBPersistence.Commit(batch)
}
// NewLevelDBHighWatermarker returns a LevelDBHighWatermarker ready to use.
func NewLevelDBHighWatermarker(o leveldb.LevelDBOptions) (*LevelDBHighWatermarker, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBHighWatermarker{
LevelDBPersistence: s,
}, nil
}
// CurationRemarker models a curation remarker database.
type CurationRemarker interface {
raw.Database
raw.Pruner
Update(*curationKey, clientmodel.Timestamp) error
Get(*curationKey) (t clientmodel.Timestamp, ok bool, err error)
}
// LevelDBCurationRemarker is an implementation of CurationRemarker backed by
// leveldb.
type LevelDBCurationRemarker struct {
*leveldb.LevelDBPersistence
}
// Get implements CurationRemarker.
func (w *LevelDBCurationRemarker) Get(c *curationKey) (t clientmodel.Timestamp, ok bool, err error) {
k := &dto.CurationKey{}
c.dump(k)
v := &dto.CurationValue{}
ok, err = w.LevelDBPersistence.Get(k, v)
if err != nil || !ok {
return clientmodel.TimestampFromUnix(0), ok, err
}
return clientmodel.TimestampFromUnix(v.GetLastCompletionTimestamp()), true, nil
}
// Update implements CurationRemarker.
func (w *LevelDBCurationRemarker) Update(pair *curationKey, t clientmodel.Timestamp) error {
k := &dto.CurationKey{}
pair.dump(k)
return w.LevelDBPersistence.Put(k, &dto.CurationValue{
LastCompletionTimestamp: proto.Int64(t.Unix()),
})
}
// NewLevelDBCurationRemarker returns a LevelDBCurationRemarker ready to use.
func NewLevelDBCurationRemarker(o leveldb.LevelDBOptions) (*LevelDBCurationRemarker, error) {
s, err := leveldb.NewLevelDBPersistence(o)
if err != nil {
return nil, err
}
return &LevelDBCurationRemarker{
LevelDBPersistence: s,
}, nil
}
type watermarkCache struct {
C utility.Cache
}
func (c *watermarkCache) Get(f *clientmodel.Fingerprint) (*watermarks, bool, error) {
v, ok, err := c.C.Get(*f)
if ok {
return v.(*watermarks), ok, err
}
return nil, ok, err
}
func (c *watermarkCache) Put(f *clientmodel.Fingerprint, v *watermarks) (bool, error) {
return c.C.Put(*f, v)
}
func (c *watermarkCache) Clear() (bool, error) {
return c.C.Clear()
}

View file

@ -1,89 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raw
import (
"code.google.com/p/goprotobuf/proto"
"github.com/prometheus/prometheus/storage"
)
// Database provides a few very basic methods to manage a database and inquire
// its state.
type Database interface {
// Close reaps all of the underlying system resources associated with
// this database. For databases that don't need that kind of clean-up,
// it is implemented as a no-op (so that clients don't need to reason
// and always call Close 'just in case').
Close() error
// State reports the state of the database as a DatabaseState object.
State() *DatabaseState
// Size returns the total size of the database in bytes. The number may
// be an approximation, depending on the underlying database type.
Size() (uint64, error)
}
// ForEacher is implemented by databases that can be iterated through.
type ForEacher interface {
// ForEach is responsible for iterating through all records in the
// database until one of the following conditions are met:
//
// 1.) A system anomaly in the database scan.
// 2.) The last record in the database is reached.
// 3.) A FilterResult of STOP is emitted by the Filter.
//
// Decoding errors for an entity cause that entity to be skipped.
ForEach(storage.RecordDecoder, storage.RecordFilter, storage.RecordOperator) (scannedEntireCorpus bool, err error)
}
// Pruner is implemented by a database that can be pruned in some way.
type Pruner interface {
Prune()
}
// Persistence models a key-value store for bytes that supports various
// additional operations.
type Persistence interface {
Database
ForEacher
// Has informs the user whether a given key exists in the database.
Has(key proto.Message) (bool, error)
// Get populates 'value' with the value of 'key', if present, in which
// case 'present' is returned as true.
Get(key, value proto.Message) (present bool, err error)
// Drop removes the key from the database.
Drop(key proto.Message) error
// Put sets the key to a given value.
Put(key, value proto.Message) error
// PutRaw sets the key to a given raw bytes value.
PutRaw(key proto.Message, value []byte) error
// Commit applies the Batch operations to the database.
Commit(Batch) error
}
// Batch models a pool of mutations for the database that can be committed
// en masse. The interface implies no protocol around the atomicity of
// effectuation.
type Batch interface {
// Close reaps all of the underlying system resources associated with
// this batch mutation.
Close()
// Put follows the same protocol as Persistence.Put.
Put(key, value proto.Message)
// PutRaw follows the same protocol as Persistence.PutRaw.
PutRaw(key proto.Message, value []byte)
// Drop follows the same protocol as Persistence.Drop.
Drop(key proto.Message)
}

View file

@ -1,88 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"fmt"
"code.google.com/p/goprotobuf/proto"
"github.com/jmhodges/levigo"
)
type batch struct {
batch *levigo.WriteBatch
drops uint32
puts uint32
}
// NewBatch returns a fully allocated batch object.
func NewBatch() *batch {
return &batch{
batch: levigo.NewWriteBatch(),
}
}
func (b *batch) Drop(key proto.Message) {
buf, _ := buffers.Get()
defer buffers.Give(buf)
if err := buf.Marshal(key); err != nil {
panic(err)
}
b.batch.Delete(buf.Bytes())
b.drops++
}
func (b *batch) Put(key, value proto.Message) {
keyBuf, _ := buffers.Get()
defer buffers.Give(keyBuf)
if err := keyBuf.Marshal(key); err != nil {
panic(err)
}
valBuf, _ := buffers.Get()
defer buffers.Give(valBuf)
if err := valBuf.Marshal(value); err != nil {
panic(err)
}
b.batch.Put(keyBuf.Bytes(), valBuf.Bytes())
b.puts++
}
func (b *batch) PutRaw(key proto.Message, value []byte) {
keyBuf, _ := buffers.Get()
defer buffers.Give(keyBuf)
if err := keyBuf.Marshal(key); err != nil {
panic(err)
}
b.batch.Put(keyBuf.Bytes(), value)
b.puts++
}
func (b *batch) Close() {
b.batch.Close()
}
func (b *batch) String() string {
return fmt.Sprintf("LevelDB batch with %d puts and %d drops.", b.puts, b.drops)
}

View file

@ -1,46 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"github.com/prometheus/prometheus/utility"
"code.google.com/p/goprotobuf/proto"
)
var buffers = newBufferList(50)
type bufferList struct {
l utility.FreeList
}
func (l *bufferList) Get() (*proto.Buffer, bool) {
if v, ok := l.l.Get(); ok {
return v.(*proto.Buffer), ok
}
return proto.NewBuffer(make([]byte, 0, 4096)), false
}
func (l *bufferList) Give(v *proto.Buffer) bool {
v.Reset()
return l.l.Give(v)
}
func newBufferList(cap int) *bufferList {
return &bufferList{
l: utility.NewFreeList(cap),
}
}

View file

@ -1,24 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"testing"
"github.com/prometheus/prometheus/storage/raw"
)
func TestInterfaceAdherence(t *testing.T) {
var _ raw.Persistence = &LevelDBPersistence{}
}

View file

@ -1,41 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"code.google.com/p/goprotobuf/proto"
)
// TODO: Evaluate whether to use coding.Encoder for the key and values instead
// raw bytes for consistency reasons.
// Iterator provides method to iterate through a leveldb.
type Iterator interface {
Error() error
Valid() bool
SeekToFirst() bool
SeekToLast() bool
Seek(proto.Message) bool
Next() bool
Previous() bool
Key(proto.Message) error
RawValue() []byte
Close() error
rawKey() []byte
}

View file

@ -1,519 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"fmt"
"time"
"code.google.com/p/goprotobuf/proto"
"github.com/jmhodges/levigo"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/raw"
)
// LevelDBPersistence is a disk-backed sorted key-value store. It implements the
// interfaces raw.Database, raw.ForEacher, raw.Pruner, raw.Persistence.
type LevelDBPersistence struct {
path string
name string
purpose string
cache *levigo.Cache
filterPolicy *levigo.FilterPolicy
options *levigo.Options
storage *levigo.DB
readOptions *levigo.ReadOptions
writeOptions *levigo.WriteOptions
}
// levigoIterator wraps the LevelDB resources in a convenient manner for uniform
// resource access and closing through the raw.Iterator protocol.
type levigoIterator struct {
// iterator is the receiver of most proxied operation calls.
iterator *levigo.Iterator
// readOptions is only set if the iterator is a snapshot of an
// underlying database. This signals that it needs to be explicitly
// reaped upon the end of this iterator's life.
readOptions *levigo.ReadOptions
// snapshot is only set if the iterator is a snapshot of an underlying
// database. This signals that it needs to be explicitly reaped upon
// the end of this this iterator's life.
snapshot *levigo.Snapshot
// storage is only set if the iterator is a snapshot of an underlying
// database. This signals that it needs to be explicitly reaped upon
// the end of this this iterator's life. The snapshot must be freed in
// the context of an actual database.
storage *levigo.DB
// closed indicates whether the iterator has been closed before.
closed bool
// valid indicates whether the iterator may be used. If a LevelDB
// iterator ever becomes invalid, it must be disposed of and cannot be
// reused.
valid bool
// creationTime provides the time at which the iterator was made.
creationTime time.Time
}
func (i levigoIterator) String() string {
valid := "valid"
open := "open"
snapshotted := "snapshotted"
if i.closed {
open = "closed"
}
if !i.valid {
valid = "invalid"
}
if i.snapshot == nil {
snapshotted = "unsnapshotted"
}
return fmt.Sprintf("levigoIterator created at %s that is %s and %s and %s", i.creationTime, open, valid, snapshotted)
}
func (i *levigoIterator) Close() error {
if i.closed {
return nil
}
if i.iterator != nil {
i.iterator.Close()
}
if i.readOptions != nil {
i.readOptions.Close()
}
if i.snapshot != nil {
i.storage.ReleaseSnapshot(i.snapshot)
}
// Explicitly dereference the pointers to prevent cycles, however unlikely.
i.iterator = nil
i.readOptions = nil
i.snapshot = nil
i.storage = nil
i.closed = true
i.valid = false
return nil
}
func (i *levigoIterator) Seek(m proto.Message) bool {
buf, _ := buffers.Get()
defer buffers.Give(buf)
if err := buf.Marshal(m); err != nil {
panic(err)
}
i.iterator.Seek(buf.Bytes())
i.valid = i.iterator.Valid()
return i.valid
}
func (i *levigoIterator) SeekToFirst() bool {
i.iterator.SeekToFirst()
i.valid = i.iterator.Valid()
return i.valid
}
func (i *levigoIterator) SeekToLast() bool {
i.iterator.SeekToLast()
i.valid = i.iterator.Valid()
return i.valid
}
func (i *levigoIterator) Next() bool {
i.iterator.Next()
i.valid = i.iterator.Valid()
return i.valid
}
func (i *levigoIterator) Previous() bool {
i.iterator.Prev()
i.valid = i.iterator.Valid()
return i.valid
}
func (i *levigoIterator) rawKey() (key []byte) {
return i.iterator.Key()
}
func (i *levigoIterator) Error() (err error) {
return i.iterator.GetError()
}
func (i *levigoIterator) Key(m proto.Message) error {
buf, _ := buffers.Get()
defer buffers.Give(buf)
buf.SetBuf(i.iterator.Key())
return buf.Unmarshal(m)
}
func (i *levigoIterator) RawValue() []byte {
return i.iterator.Value()
}
func (i *levigoIterator) Valid() bool {
return i.valid
}
// Compression defines the compression mode.
type Compression uint
// Possible compression modes.
const (
Snappy Compression = iota
Uncompressed
)
// LevelDBOptions bundles options needed to create a LevelDBPersistence object.
type LevelDBOptions struct {
Path string
Name string
Purpose string
CacheSizeBytes int
OpenFileAllowance int
FlushOnMutate bool
UseParanoidChecks bool
Compression Compression
}
// NewLevelDBPersistence returns an initialized LevelDBPersistence object,
// created with the given options.
func NewLevelDBPersistence(o LevelDBOptions) (*LevelDBPersistence, error) {
options := levigo.NewOptions()
options.SetCreateIfMissing(true)
options.SetParanoidChecks(o.UseParanoidChecks)
compression := levigo.SnappyCompression
if o.Compression == Uncompressed {
compression = levigo.NoCompression
}
options.SetCompression(compression)
cache := levigo.NewLRUCache(o.CacheSizeBytes)
options.SetCache(cache)
filterPolicy := levigo.NewBloomFilter(10)
options.SetFilterPolicy(filterPolicy)
options.SetMaxOpenFiles(o.OpenFileAllowance)
storage, err := levigo.Open(o.Path, options)
if err != nil {
return nil, err
}
readOptions := levigo.NewReadOptions()
writeOptions := levigo.NewWriteOptions()
writeOptions.SetSync(o.FlushOnMutate)
return &LevelDBPersistence{
path: o.Path,
name: o.Name,
purpose: o.Purpose,
cache: cache,
filterPolicy: filterPolicy,
options: options,
readOptions: readOptions,
writeOptions: writeOptions,
storage: storage,
}, nil
}
// Close implements raw.Persistence (and raw.Database).
func (l *LevelDBPersistence) Close() error {
// These are deferred to take advantage of forced closing in case of
// stack unwinding due to anomalies.
defer func() {
if l.filterPolicy != nil {
l.filterPolicy.Close()
}
}()
defer func() {
if l.cache != nil {
l.cache.Close()
}
}()
defer func() {
if l.options != nil {
l.options.Close()
}
}()
defer func() {
if l.readOptions != nil {
l.readOptions.Close()
}
}()
defer func() {
if l.writeOptions != nil {
l.writeOptions.Close()
}
}()
defer func() {
if l.storage != nil {
l.storage.Close()
}
}()
return nil
}
// Get implements raw.Persistence.
func (l *LevelDBPersistence) Get(k, v proto.Message) (bool, error) {
buf, _ := buffers.Get()
defer buffers.Give(buf)
if err := buf.Marshal(k); err != nil {
panic(err)
}
raw, err := l.storage.Get(l.readOptions, buf.Bytes())
if err != nil {
return false, err
}
if raw == nil {
return false, nil
}
if v == nil {
return true, nil
}
buf.SetBuf(raw)
if err := buf.Unmarshal(v); err != nil {
return true, err
}
return true, nil
}
// Has implements raw.Persistence.
func (l *LevelDBPersistence) Has(k proto.Message) (has bool, err error) {
return l.Get(k, nil)
}
// Drop implements raw.Persistence.
func (l *LevelDBPersistence) Drop(k proto.Message) error {
buf, _ := buffers.Get()
defer buffers.Give(buf)
if err := buf.Marshal(k); err != nil {
panic(err)
}
return l.storage.Delete(l.writeOptions, buf.Bytes())
}
// Put implements raw.Persistence.
func (l *LevelDBPersistence) Put(k, v proto.Message) error {
keyBuf, _ := buffers.Get()
defer buffers.Give(keyBuf)
if err := keyBuf.Marshal(k); err != nil {
panic(err)
}
valBuf, _ := buffers.Get()
defer buffers.Give(valBuf)
if err := valBuf.Marshal(v); err != nil {
panic(err)
}
return l.storage.Put(l.writeOptions, keyBuf.Bytes(), valBuf.Bytes())
}
// PutRaw implements raw.Persistence.
func (l *LevelDBPersistence) PutRaw(key proto.Message, value []byte) error {
keyBuf, _ := buffers.Get()
defer buffers.Give(keyBuf)
if err := keyBuf.Marshal(key); err != nil {
panic(err)
}
return l.storage.Put(l.writeOptions, keyBuf.Bytes(), value)
}
// Commit implements raw.Persistence.
func (l *LevelDBPersistence) Commit(b raw.Batch) (err error) {
// XXX: This is a wart to clean up later. Ideally, after doing
// extensive tests, we could create a Batch struct that journals pending
// operations which the given Persistence implementation could convert
// to its specific commit requirements.
batch, ok := b.(*batch)
if !ok {
panic("leveldb.batch expected")
}
return l.storage.Write(l.writeOptions, batch.batch)
}
// Prune implements raw.Pruner. It compacts the entire keyspace of the database.
//
// Beware that it would probably be imprudent to run this on a live user-facing
// server due to latency implications.
func (l *LevelDBPersistence) Prune() {
// Magic values per https://code.google.com/p/leveldb/source/browse/include/leveldb/db.h#131.
keyspace := levigo.Range{
Start: nil,
Limit: nil,
}
l.storage.CompactRange(keyspace)
}
// Size returns the approximate size the entire database takes on disk (in
// bytes). It implements the raw.Database interface.
func (l *LevelDBPersistence) Size() (uint64, error) {
iterator, err := l.NewIterator(false)
if err != nil {
return 0, err
}
defer iterator.Close()
if !iterator.SeekToFirst() {
return 0, fmt.Errorf("could not seek to first key")
}
keyspace := levigo.Range{}
keyspace.Start = iterator.rawKey()
if !iterator.SeekToLast() {
return 0, fmt.Errorf("could not seek to last key")
}
keyspace.Limit = iterator.rawKey()
sizes := l.storage.GetApproximateSizes([]levigo.Range{keyspace})
total := uint64(0)
for _, size := range sizes {
total += size
}
return total, nil
}
// NewIterator creates a new levigoIterator, which follows the Iterator
// interface.
//
// Important notes:
//
// For each of the iterator methods that have a return signature of (ok bool),
// if ok == false, the iterator may not be used any further and must be closed.
// Further work with the database requires the creation of a new iterator. This
// is due to LevelDB and Levigo design. Please refer to Jeff and Sanjay's notes
// in the LevelDB documentation for this behavior's rationale.
//
// The returned iterator must explicitly be closed; otherwise non-managed memory
// will be leaked.
//
// The iterator is optionally snapshotable.
func (l *LevelDBPersistence) NewIterator(snapshotted bool) (Iterator, error) {
var (
snapshot *levigo.Snapshot
readOptions *levigo.ReadOptions
iterator *levigo.Iterator
)
if snapshotted {
snapshot = l.storage.NewSnapshot()
readOptions = levigo.NewReadOptions()
readOptions.SetSnapshot(snapshot)
iterator = l.storage.NewIterator(readOptions)
} else {
iterator = l.storage.NewIterator(l.readOptions)
}
return &levigoIterator{
creationTime: time.Now(),
iterator: iterator,
readOptions: readOptions,
snapshot: snapshot,
storage: l.storage,
}, nil
}
// ForEach implements raw.ForEacher.
func (l *LevelDBPersistence) ForEach(decoder storage.RecordDecoder, filter storage.RecordFilter, operator storage.RecordOperator) (scannedEntireCorpus bool, err error) {
iterator, err := l.NewIterator(true)
if err != nil {
return false, err
}
defer iterator.Close()
for valid := iterator.SeekToFirst(); valid; valid = iterator.Next() {
if err = iterator.Error(); err != nil {
return false, err
}
decodedKey, decodeErr := decoder.DecodeKey(iterator.rawKey())
if decodeErr != nil {
continue
}
decodedValue, decodeErr := decoder.DecodeValue(iterator.RawValue())
if decodeErr != nil {
continue
}
switch filter.Filter(decodedKey, decodedValue) {
case storage.Stop:
return
case storage.Skip:
continue
case storage.Accept:
opErr := operator.Operate(decodedKey, decodedValue)
if opErr != nil {
if opErr.Continuable {
continue
}
break
}
}
}
return true, nil
}

View file

@ -1,49 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leveldb
import (
"github.com/prometheus/prometheus/storage/raw"
"github.com/prometheus/prometheus/utility"
)
const (
statsKey = "leveldb.stats"
sstablesKey = "leveldb.sstables"
)
// State returns the DatabaseState. It implements the raw.Database interface and
// sets the following Supplemental entries:
// "Low Level": leveldb property value for "leveldb.stats"
// "SSTable": leveldb property value for "leveldb.sstables"
// "Errors": only set if an error has occurred determining the size
func (l *LevelDBPersistence) State() *raw.DatabaseState {
databaseState := &raw.DatabaseState{
Location: l.path,
Name: l.name,
Purpose: l.purpose,
Supplemental: map[string]string{},
}
if size, err := l.Size(); err != nil {
databaseState.Supplemental["Errors"] = err.Error()
} else {
databaseState.Size = utility.ByteSize(size)
}
databaseState.Supplemental["Low Level"] = l.storage.PropertyValue(statsKey)
databaseState.Supplemental["SSTable"] = l.storage.PropertyValue(sstablesKey)
return databaseState
}

View file

@ -1,124 +0,0 @@
// Copyright 2013 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"testing"
"code.google.com/p/goprotobuf/proto"
"github.com/prometheus/prometheus/storage/raw/leveldb"
"github.com/prometheus/prometheus/utility/test"
)
const cacheCapacity = 0
type (
// Pair models a prospective (key, value) double that will be committed
// to a database.
Pair interface {
Get() (key proto.Message, value interface{})
}
// Pairs models a list of Pair for disk committing.
Pairs []Pair
// Preparer readies a LevelDB store for a given raw state given the
// fixtures definitions passed into it.
Preparer interface {
// Prepare furnishes the database and returns its path along
// with any encountered anomalies.
Prepare(namespace string, f FixtureFactory) test.TemporaryDirectory
}
// FixtureFactory is an iterator emitting fixture data.
FixtureFactory interface {
// HasNext indicates whether the FixtureFactory has more pending
// fixture data to build.
HasNext() (has bool)
// Next emits the next (key, value) double for storage.
Next() (key proto.Message, value interface{})
}
preparer struct {
tester testing.TB
}
cassetteFactory struct {
index int
count int
pairs Pairs
}
)
func (p preparer) Prepare(n string, f FixtureFactory) (t test.TemporaryDirectory) {
t = test.NewTemporaryDirectory(n, p.tester)
persistence, err := leveldb.NewLevelDBPersistence(leveldb.LevelDBOptions{
Path: t.Path(),
CacheSizeBytes: cacheCapacity,
})
if err != nil {
defer t.Close()
p.tester.Fatal(err)
}
defer persistence.Close()
for f.HasNext() {
key, value := f.Next()
switch v := value.(type) {
case proto.Message:
err = persistence.Put(key, v)
case []byte:
err = persistence.PutRaw(key, v)
default:
panic("illegal value type")
}
if err != nil {
defer t.Close()
p.tester.Fatal(err)
}
}
return
}
// HasNext implements FixtureFactory.
func (f cassetteFactory) HasNext() bool {
return f.index < f.count
}
// Next implements FixtureFactory.
func (f *cassetteFactory) Next() (key proto.Message, value interface{}) {
key, value = f.pairs[f.index].Get()
f.index++
return
}
// NewPreparer creates a new Preparer for use in testing scenarios.
func NewPreparer(t testing.TB) Preparer {
return preparer{t}
}
// NewCassetteFactory builds a new FixtureFactory that uses Pairs as the basis
// for generated fixture data.
func NewCassetteFactory(pairs Pairs) FixtureFactory {
return &cassetteFactory{
pairs: pairs,
count: len(pairs),
}
}

Some files were not shown because too many files have changed in this diff Show more