mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-14 01:24:04 -08:00
f7e8348a88
* Re-add contexts to storage.Storage.Querier() These are needed when replacing the storage by a multi-tenant implementation where the tenant is stored in the context. The 1.x query interfaces already had contexts, but they got lost in 2.x. * Convert promql.Engine to use native contexts
259 lines
6.7 KiB
Go
259 lines
6.7 KiB
Go
// Copyright 2017 The Prometheus Authors
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package tsdb
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
"unsafe"
|
|
|
|
"github.com/go-kit/kit/log"
|
|
"github.com/pkg/errors"
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"github.com/prometheus/common/model"
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
|
"github.com/prometheus/prometheus/storage"
|
|
"github.com/prometheus/tsdb"
|
|
tsdbLabels "github.com/prometheus/tsdb/labels"
|
|
)
|
|
|
|
// ErrNotReady is returned if the underlying storage is not ready yet.
|
|
var ErrNotReady = errors.New("TSDB not ready")
|
|
|
|
// ReadyStorage implements the Storage interface while allowing to set the actual
|
|
// storage at a later point in time.
|
|
type ReadyStorage struct {
|
|
mtx sync.RWMutex
|
|
a *adapter
|
|
}
|
|
|
|
// Set the storage.
|
|
func (s *ReadyStorage) Set(db *tsdb.DB) {
|
|
s.mtx.Lock()
|
|
defer s.mtx.Unlock()
|
|
|
|
s.a = &adapter{db: db}
|
|
}
|
|
|
|
// Get the storage.
|
|
func (s *ReadyStorage) Get() *tsdb.DB {
|
|
if x := s.get(); x != nil {
|
|
return x.db
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *ReadyStorage) get() *adapter {
|
|
s.mtx.RLock()
|
|
x := s.a
|
|
s.mtx.RUnlock()
|
|
return x
|
|
}
|
|
|
|
// Querier implements the Storage interface.
|
|
func (s *ReadyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
|
if x := s.get(); x != nil {
|
|
return x.Querier(ctx, mint, maxt)
|
|
}
|
|
return nil, ErrNotReady
|
|
}
|
|
|
|
// Appender implements the Storage interface.
|
|
func (s *ReadyStorage) Appender() (storage.Appender, error) {
|
|
if x := s.get(); x != nil {
|
|
return x.Appender()
|
|
}
|
|
return nil, ErrNotReady
|
|
}
|
|
|
|
// Close implements the Storage interface.
|
|
func (s *ReadyStorage) Close() error {
|
|
if x := s.Get(); x != nil {
|
|
return x.Close()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func Adapter(db *tsdb.DB) storage.Storage {
|
|
return &adapter{db: db}
|
|
}
|
|
|
|
// adapter implements a storage.Storage around TSDB.
|
|
type adapter struct {
|
|
db *tsdb.DB
|
|
}
|
|
|
|
// Options of the DB storage.
|
|
type Options struct {
|
|
// The interval at which the write ahead log is flushed to disc.
|
|
WALFlushInterval time.Duration
|
|
|
|
// The timestamp range of head blocks after which they get persisted.
|
|
// It's the minimum duration of any persisted block.
|
|
MinBlockDuration model.Duration
|
|
|
|
// The maximum timestamp range of compacted blocks.
|
|
MaxBlockDuration model.Duration
|
|
|
|
// Duration for how long to retain data.
|
|
Retention model.Duration
|
|
|
|
// Disable creation and consideration of lockfile.
|
|
NoLockfile bool
|
|
}
|
|
|
|
// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
|
|
func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
|
|
// Start with smallest block duration and create exponential buckets until the exceed the
|
|
// configured maximum block duration.
|
|
rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3)
|
|
|
|
for i, v := range rngs {
|
|
if v > int64(time.Duration(opts.MaxBlockDuration).Seconds()*1000) {
|
|
rngs = rngs[:i]
|
|
break
|
|
}
|
|
}
|
|
|
|
db, err := tsdb.Open(path, l, r, &tsdb.Options{
|
|
WALFlushInterval: 10 * time.Second,
|
|
RetentionDuration: uint64(time.Duration(opts.Retention).Seconds() * 1000),
|
|
BlockRanges: rngs,
|
|
NoLockfile: opts.NoLockfile,
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return db, nil
|
|
}
|
|
|
|
func (a adapter) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
|
|
return querier{q: a.db.Querier(mint, maxt)}, nil
|
|
}
|
|
|
|
// Appender returns a new appender against the storage.
|
|
func (a adapter) Appender() (storage.Appender, error) {
|
|
return appender{a: a.db.Appender()}, nil
|
|
}
|
|
|
|
// Close closes the storage and all its underlying resources.
|
|
func (a adapter) Close() error {
|
|
return a.db.Close()
|
|
}
|
|
|
|
type querier struct {
|
|
q tsdb.Querier
|
|
}
|
|
|
|
func (q querier) Select(oms ...*labels.Matcher) storage.SeriesSet {
|
|
ms := make([]tsdbLabels.Matcher, 0, len(oms))
|
|
|
|
for _, om := range oms {
|
|
ms = append(ms, convertMatcher(om))
|
|
}
|
|
|
|
return seriesSet{set: q.q.Select(ms...)}
|
|
}
|
|
|
|
func (q querier) LabelValues(name string) ([]string, error) { return q.q.LabelValues(name) }
|
|
func (q querier) Close() error { return q.q.Close() }
|
|
|
|
type seriesSet struct {
|
|
set tsdb.SeriesSet
|
|
}
|
|
|
|
func (s seriesSet) Next() bool { return s.set.Next() }
|
|
func (s seriesSet) Err() error { return s.set.Err() }
|
|
func (s seriesSet) At() storage.Series { return series{s: s.set.At()} }
|
|
|
|
type series struct {
|
|
s tsdb.Series
|
|
}
|
|
|
|
func (s series) Labels() labels.Labels { return toLabels(s.s.Labels()) }
|
|
func (s series) Iterator() storage.SeriesIterator { return storage.SeriesIterator(s.s.Iterator()) }
|
|
|
|
type appender struct {
|
|
a tsdb.Appender
|
|
}
|
|
|
|
func (a appender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
|
|
ref, err := a.a.Add(toTSDBLabels(lset), t, v)
|
|
|
|
switch errors.Cause(err) {
|
|
case tsdb.ErrNotFound:
|
|
return 0, storage.ErrNotFound
|
|
case tsdb.ErrOutOfOrderSample:
|
|
return 0, storage.ErrOutOfOrderSample
|
|
case tsdb.ErrAmendSample:
|
|
return 0, storage.ErrDuplicateSampleForTimestamp
|
|
case tsdb.ErrOutOfBounds:
|
|
return 0, storage.ErrOutOfBounds
|
|
}
|
|
return ref, err
|
|
}
|
|
|
|
func (a appender) AddFast(_ labels.Labels, ref uint64, t int64, v float64) error {
|
|
err := a.a.AddFast(ref, t, v)
|
|
|
|
switch errors.Cause(err) {
|
|
case tsdb.ErrNotFound:
|
|
return storage.ErrNotFound
|
|
case tsdb.ErrOutOfOrderSample:
|
|
return storage.ErrOutOfOrderSample
|
|
case tsdb.ErrAmendSample:
|
|
return storage.ErrDuplicateSampleForTimestamp
|
|
case tsdb.ErrOutOfBounds:
|
|
return storage.ErrOutOfBounds
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (a appender) Commit() error { return a.a.Commit() }
|
|
func (a appender) Rollback() error { return a.a.Rollback() }
|
|
|
|
func convertMatcher(m *labels.Matcher) tsdbLabels.Matcher {
|
|
switch m.Type {
|
|
case labels.MatchEqual:
|
|
return tsdbLabels.NewEqualMatcher(m.Name, m.Value)
|
|
|
|
case labels.MatchNotEqual:
|
|
return tsdbLabels.Not(tsdbLabels.NewEqualMatcher(m.Name, m.Value))
|
|
|
|
case labels.MatchRegexp:
|
|
res, err := tsdbLabels.NewRegexpMatcher(m.Name, m.Value)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return res
|
|
|
|
case labels.MatchNotRegexp:
|
|
res, err := tsdbLabels.NewRegexpMatcher(m.Name, m.Value)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return tsdbLabels.Not(res)
|
|
}
|
|
panic("storage.convertMatcher: invalid matcher type")
|
|
}
|
|
|
|
func toTSDBLabels(l labels.Labels) tsdbLabels.Labels {
|
|
return *(*tsdbLabels.Labels)(unsafe.Pointer(&l))
|
|
}
|
|
|
|
func toLabels(l tsdbLabels.Labels) labels.Labels {
|
|
return *(*labels.Labels)(unsafe.Pointer(&l))
|
|
}
|