mirror of
https://github.com/prometheus/prometheus.git
synced 2024-12-24 21:24:05 -08:00
Merge branch 'master' into split_parser
This commit is contained in:
commit
4835bbf376
56
.github/stale.yml
vendored
Normal file
56
.github/stale.yml
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
# Configuration for probot-stale - https://github.com/probot/stale
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||||
|
daysUntilStale: 60
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||||
|
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||||
|
daysUntilClose: false
|
||||||
|
|
||||||
|
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||||
|
onlyLabels: []
|
||||||
|
|
||||||
|
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||||
|
exemptLabels:
|
||||||
|
- keepalive
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a project (defaults to false)
|
||||||
|
exemptProjects: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a milestone (defaults to false)
|
||||||
|
exemptMilestones: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues with an assignee (defaults to false)
|
||||||
|
exemptAssignees: false
|
||||||
|
|
||||||
|
# Label to use when marking as stale
|
||||||
|
staleLabel: stale
|
||||||
|
|
||||||
|
# Comment to post when marking as stale. Set to `false` to disable
|
||||||
|
markComment: false
|
||||||
|
|
||||||
|
# Comment to post when removing the stale label.
|
||||||
|
# unmarkComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Comment to post when closing a stale Issue or Pull Request.
|
||||||
|
# closeComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||||
|
limitPerRun: 30
|
||||||
|
|
||||||
|
# Limit to only `issues` or `pulls`
|
||||||
|
only: pulls
|
||||||
|
|
||||||
|
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||||
|
# pulls:
|
||||||
|
# daysUntilStale: 30
|
||||||
|
# markComment: >
|
||||||
|
# This pull request has been automatically marked as stale because it has not had
|
||||||
|
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
# for your contributions.
|
||||||
|
|
||||||
|
# issues:
|
||||||
|
# exemptLabels:
|
||||||
|
# - confirmed
|
6
.github/workflows/prombench.yml
vendored
6
.github/workflows/prombench.yml
vendored
|
@ -25,7 +25,7 @@ jobs:
|
||||||
CLUSTER_NAME: prombench
|
CLUSTER_NAME: prombench
|
||||||
ZONE: europe-west3-a
|
ZONE: europe-west3-a
|
||||||
DOMAIN_NAME: prombench.prometheus.io
|
DOMAIN_NAME: prombench.prometheus.io
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
TEST_INFRA_REPO: https://github.com/prometheus/test-infra.git
|
||||||
GITHUB_ORG: prometheus
|
GITHUB_ORG: prometheus
|
||||||
GITHUB_REPO: prometheus
|
GITHUB_REPO: prometheus
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||||
|
@ -82,7 +82,7 @@ jobs:
|
||||||
PROJECT_ID: macro-mile-203600
|
PROJECT_ID: macro-mile-203600
|
||||||
CLUSTER_NAME: prombench
|
CLUSTER_NAME: prombench
|
||||||
ZONE: europe-west3-a
|
ZONE: europe-west3-a
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
TEST_INFRA_REPO: https://github.com/prometheus/test-infra.git
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||||
with:
|
with:
|
||||||
args: >-
|
args: >-
|
||||||
|
@ -137,7 +137,7 @@ jobs:
|
||||||
CLUSTER_NAME: prombench
|
CLUSTER_NAME: prombench
|
||||||
ZONE: europe-west3-a
|
ZONE: europe-west3-a
|
||||||
DOMAIN_NAME: prombench.prometheus.io
|
DOMAIN_NAME: prombench.prometheus.io
|
||||||
TEST_INFRA_REPO: https://github.com/prometheus/prombench.git
|
TEST_INFRA_REPO: https://github.com/prometheus/test-infra.git
|
||||||
GITHUB_ORG: prometheus
|
GITHUB_ORG: prometheus
|
||||||
GITHUB_REPO: prometheus
|
GITHUB_REPO: prometheus
|
||||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||||
|
|
|
@ -21,7 +21,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||||
| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) |
|
| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) |
|
| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) |
|
||||||
| v2.17 | 2020-03-11 | **searching for volunteer** |
|
| v2.17 | 2020-03-11 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.18 | 2020-04-22 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
||||||
|
@ -31,6 +32,7 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/go-kit/kit/log/level"
|
"github.com/go-kit/kit/log/level"
|
||||||
conntrack "github.com/mwitkow/go-conntrack"
|
conntrack "github.com/mwitkow/go-conntrack"
|
||||||
|
@ -48,6 +50,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
sd_config "github.com/prometheus/prometheus/discovery/config"
|
sd_config "github.com/prometheus/prometheus/discovery/config"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/pkg/logging"
|
"github.com/prometheus/prometheus/pkg/logging"
|
||||||
"github.com/prometheus/prometheus/pkg/relabel"
|
"github.com/prometheus/prometheus/pkg/relabel"
|
||||||
prom_runtime "github.com/prometheus/prometheus/pkg/runtime"
|
prom_runtime "github.com/prometheus/prometheus/pkg/runtime"
|
||||||
|
@ -56,7 +59,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/remote"
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
"github.com/prometheus/prometheus/storage/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
"github.com/prometheus/prometheus/web"
|
"github.com/prometheus/prometheus/web"
|
||||||
)
|
)
|
||||||
|
@ -106,7 +109,7 @@ func main() {
|
||||||
outageTolerance model.Duration
|
outageTolerance model.Duration
|
||||||
resendDelay model.Duration
|
resendDelay model.Duration
|
||||||
web web.Options
|
web web.Options
|
||||||
tsdb tsdb.Options
|
tsdb tsdbOptions
|
||||||
lookbackDelta model.Duration
|
lookbackDelta model.Duration
|
||||||
webTimeout model.Duration
|
webTimeout model.Duration
|
||||||
queryTimeout model.Duration
|
queryTimeout model.Duration
|
||||||
|
@ -334,7 +337,7 @@ func main() {
|
||||||
level.Info(logger).Log("vm_limits", prom_runtime.VmLimits())
|
level.Info(logger).Log("vm_limits", prom_runtime.VmLimits())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
localStorage = &tsdb.ReadyStorage{}
|
localStorage = &readyStorage{}
|
||||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline))
|
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline))
|
||||||
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
||||||
)
|
)
|
||||||
|
@ -381,12 +384,13 @@ func main() {
|
||||||
|
|
||||||
cfg.web.Context = ctxWeb
|
cfg.web.Context = ctxWeb
|
||||||
cfg.web.TSDB = localStorage.Get
|
cfg.web.TSDB = localStorage.Get
|
||||||
|
cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration
|
||||||
|
cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes
|
||||||
cfg.web.Storage = fanoutStorage
|
cfg.web.Storage = fanoutStorage
|
||||||
cfg.web.QueryEngine = queryEngine
|
cfg.web.QueryEngine = queryEngine
|
||||||
cfg.web.ScrapeManager = scrapeManager
|
cfg.web.ScrapeManager = scrapeManager
|
||||||
cfg.web.RuleManager = ruleManager
|
cfg.web.RuleManager = ruleManager
|
||||||
cfg.web.Notifier = notifierManager
|
cfg.web.Notifier = notifierManager
|
||||||
cfg.web.TSDBCfg = cfg.tsdb
|
|
||||||
cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
|
cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
|
||||||
|
|
||||||
cfg.web.Version = &web.PrometheusVersion{
|
cfg.web.Version = &web.PrometheusVersion{
|
||||||
|
@ -656,6 +660,7 @@ func main() {
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// TSDB.
|
// TSDB.
|
||||||
|
opts := cfg.tsdb.ToTSDBOptions()
|
||||||
cancel := make(chan struct{})
|
cancel := make(chan struct{})
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
|
@ -665,15 +670,16 @@ func main() {
|
||||||
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
|
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
db, err := tsdb.Open(
|
db, err := openDBWithMetrics(
|
||||||
cfg.localStoragePath,
|
cfg.localStoragePath,
|
||||||
log.With(logger, "component", "tsdb"),
|
logger,
|
||||||
prometheus.DefaultRegisterer,
|
prometheus.DefaultRegisterer,
|
||||||
&cfg.tsdb,
|
&opts,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "opening storage failed")
|
return errors.Wrapf(err, "opening storage failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath))
|
level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath))
|
||||||
level.Info(logger).Log("msg", "TSDB started")
|
level.Info(logger).Log("msg", "TSDB started")
|
||||||
level.Debug(logger).Log("msg", "TSDB options",
|
level.Debug(logger).Log("msg", "TSDB options",
|
||||||
|
@ -744,6 +750,40 @@ func main() {
|
||||||
level.Info(logger).Log("msg", "See you next time!")
|
level.Info(logger).Log("msg", "See you next time!")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options) (*tsdb.DB, error) {
|
||||||
|
db, err := tsdb.Open(
|
||||||
|
dir,
|
||||||
|
log.With(logger, "component", "tsdb"),
|
||||||
|
reg,
|
||||||
|
opts,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reg.MustRegister(
|
||||||
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
|
Name: "prometheus_tsdb_lowest_timestamp_seconds",
|
||||||
|
Help: "Lowest timestamp value stored in the database.",
|
||||||
|
}, func() float64 {
|
||||||
|
bb := db.Blocks()
|
||||||
|
if len(bb) == 0 {
|
||||||
|
return float64(db.Head().MinTime() / 1000)
|
||||||
|
}
|
||||||
|
return float64(db.Blocks()[0].Meta().MinTime / 1000)
|
||||||
|
}), prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
|
Name: "prometheus_tsdb_head_min_time_seconds",
|
||||||
|
Help: "Minimum time bound of the head block.",
|
||||||
|
}, func() float64 { return float64(db.Head().MinTime() / 1000) }),
|
||||||
|
prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||||
|
Name: "prometheus_tsdb_head_max_time_seconds",
|
||||||
|
Help: "Maximum timestamp of the head block.",
|
||||||
|
}, func() float64 { return float64(db.Head().MaxTime() / 1000) }),
|
||||||
|
)
|
||||||
|
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (err error) {
|
func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config) error) (err error) {
|
||||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||||
|
|
||||||
|
@ -853,3 +893,116 @@ func sendAlerts(s sender, externalURL string) rules.NotifyFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readyStorage implements the Storage interface while allowing to set the actual
|
||||||
|
// storage at a later point in time.
|
||||||
|
type readyStorage struct {
|
||||||
|
mtx sync.RWMutex
|
||||||
|
db *tsdb.DB
|
||||||
|
startTimeMargin int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the storage.
|
||||||
|
func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
|
||||||
|
s.mtx.Lock()
|
||||||
|
defer s.mtx.Unlock()
|
||||||
|
|
||||||
|
s.db = db
|
||||||
|
s.startTimeMargin = startTimeMargin
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the storage.
|
||||||
|
func (s *readyStorage) Get() *tsdb.DB {
|
||||||
|
if x := s.get(); x != nil {
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *readyStorage) get() *tsdb.DB {
|
||||||
|
s.mtx.RLock()
|
||||||
|
x := s.db
|
||||||
|
s.mtx.RUnlock()
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTime implements the Storage interface.
|
||||||
|
func (s *readyStorage) StartTime() (int64, error) {
|
||||||
|
if x := s.get(); x != nil {
|
||||||
|
var startTime int64
|
||||||
|
|
||||||
|
if len(x.Blocks()) > 0 {
|
||||||
|
startTime = x.Blocks()[0].Meta().MinTime
|
||||||
|
} else {
|
||||||
|
startTime = time.Now().Unix() * 1000
|
||||||
|
}
|
||||||
|
// Add a safety margin as it may take a few minutes for everything to spin up.
|
||||||
|
return startTime + s.startTimeMargin, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return math.MaxInt64, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
|
// Querier implements the Storage interface.
|
||||||
|
func (s *readyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
||||||
|
if x := s.get(); x != nil {
|
||||||
|
return x.Querier(ctx, mint, maxt)
|
||||||
|
}
|
||||||
|
return nil, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
|
// Appender implements the Storage interface.
|
||||||
|
func (s *readyStorage) Appender() storage.Appender {
|
||||||
|
if x := s.get(); x != nil {
|
||||||
|
return x.Appender()
|
||||||
|
}
|
||||||
|
return notReadyAppender{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type notReadyAppender struct{}
|
||||||
|
|
||||||
|
func (n notReadyAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
|
||||||
|
return 0, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n notReadyAppender) AddFast(ref uint64, t int64, v float64) error { return tsdb.ErrNotReady }
|
||||||
|
|
||||||
|
func (n notReadyAppender) Commit() error { return tsdb.ErrNotReady }
|
||||||
|
|
||||||
|
func (n notReadyAppender) Rollback() error { return tsdb.ErrNotReady }
|
||||||
|
|
||||||
|
// Close implements the Storage interface.
|
||||||
|
func (s *readyStorage) Close() error {
|
||||||
|
if x := s.Get(); x != nil {
|
||||||
|
return x.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tsdbOptions is tsdb.Option version with defined units.
|
||||||
|
// This is required as tsdb.Option fields are unit agnostic (time).
|
||||||
|
type tsdbOptions struct {
|
||||||
|
WALSegmentSize units.Base2Bytes
|
||||||
|
RetentionDuration model.Duration
|
||||||
|
MaxBytes units.Base2Bytes
|
||||||
|
NoLockfile bool
|
||||||
|
AllowOverlappingBlocks bool
|
||||||
|
WALCompression bool
|
||||||
|
StripeSize int
|
||||||
|
MinBlockDuration model.Duration
|
||||||
|
MaxBlockDuration model.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
||||||
|
return tsdb.Options{
|
||||||
|
WALSegmentSize: int(opts.WALSegmentSize),
|
||||||
|
RetentionDuration: int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
|
||||||
|
MaxBytes: int64(opts.MaxBytes),
|
||||||
|
NoLockfile: opts.NoLockfile,
|
||||||
|
AllowOverlappingBlocks: opts.AllowOverlappingBlocks,
|
||||||
|
WALCompression: opts.WALCompression,
|
||||||
|
StripeSize: opts.StripeSize,
|
||||||
|
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
||||||
|
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -24,6 +25,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-kit/kit/log"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
|
@ -232,3 +236,71 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimeMetrics(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "time_metrics_e2e")
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
testutil.Ok(t, os.RemoveAll(tmpDir))
|
||||||
|
}()
|
||||||
|
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
defer func() {
|
||||||
|
testutil.Ok(t, db.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check initial values.
|
||||||
|
testutil.Equals(t, map[string]float64{
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds": float64(math.MaxInt64) / 1000,
|
||||||
|
"prometheus_tsdb_head_min_time_seconds": float64(math.MaxInt64) / 1000,
|
||||||
|
"prometheus_tsdb_head_max_time_seconds": float64(math.MinInt64) / 1000,
|
||||||
|
}, getCurrentGaugeValuesFor(t, reg,
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds",
|
||||||
|
"prometheus_tsdb_head_min_time_seconds",
|
||||||
|
"prometheus_tsdb_head_max_time_seconds",
|
||||||
|
))
|
||||||
|
|
||||||
|
app := db.Appender()
|
||||||
|
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 1)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 1)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
_, err = app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 3000, 1)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
testutil.Ok(t, app.Commit())
|
||||||
|
|
||||||
|
testutil.Equals(t, map[string]float64{
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds": 1.0,
|
||||||
|
"prometheus_tsdb_head_min_time_seconds": 1.0,
|
||||||
|
"prometheus_tsdb_head_max_time_seconds": 3.0,
|
||||||
|
}, getCurrentGaugeValuesFor(t, reg,
|
||||||
|
"prometheus_tsdb_lowest_timestamp_seconds",
|
||||||
|
"prometheus_tsdb_head_min_time_seconds",
|
||||||
|
"prometheus_tsdb_head_max_time_seconds",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames ...string) map[string]float64 {
|
||||||
|
f, err := reg.Gather()
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
res := make(map[string]float64, len(metricNames))
|
||||||
|
for _, g := range f {
|
||||||
|
for _, m := range metricNames {
|
||||||
|
if g.GetName() != m {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Equals(t, 1, len(g.GetMetric()))
|
||||||
|
if _, ok := res[m]; ok {
|
||||||
|
t.Error("expected only one metric family for", m)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
res[m] = *g.GetMetric()[0].GetGauge().Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ func (ls Labels) Has(name string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasDuplicateLabels returns whether ls has duplicate label names.
|
// HasDuplicateLabelNames returns whether ls has duplicate label names.
|
||||||
// It assumes that the labelset is sorted.
|
// It assumes that the labelset is sorted.
|
||||||
func (ls Labels) HasDuplicateLabelNames() (string, bool) {
|
func (ls Labels) HasDuplicateLabelNames() (string, bool) {
|
||||||
for i, l := range ls {
|
for i, l := range ls {
|
||||||
|
|
|
@ -175,3 +175,82 @@ func TestLabels_HasDuplicateLabelNames(t *testing.T) {
|
||||||
testutil.Equals(t, c.LabelName, l, "test %d: incorrect label name", i)
|
testutil.Equals(t, c.LabelName, l, "test %d: incorrect label name", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLabels_WithoutEmpty(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input Labels
|
||||||
|
expected Labels
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: Labels{
|
||||||
|
{
|
||||||
|
Name: "__name__",
|
||||||
|
Value: "test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hostname",
|
||||||
|
Value: "localhost",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "bar",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "check",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: Labels{
|
||||||
|
{
|
||||||
|
Name: "__name__",
|
||||||
|
Value: "test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hostname",
|
||||||
|
Value: "localhost",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "check",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: Labels{
|
||||||
|
{
|
||||||
|
Name: "__name__",
|
||||||
|
Value: "test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hostname",
|
||||||
|
Value: "localhost",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "check",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: Labels{
|
||||||
|
{
|
||||||
|
Name: "__name__",
|
||||||
|
Value: "test",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "hostname",
|
||||||
|
Value: "localhost",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "job",
|
||||||
|
Value: "check",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
got := test.input.WithoutEmpty()
|
||||||
|
testutil.Equals(t, test.expected, got, "unexpected labelset for test case %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -68,13 +68,10 @@ func BenchmarkRangeQuery(b *testing.B) {
|
||||||
numIntervals := 8640 + 10000
|
numIntervals := 8640 + 10000
|
||||||
|
|
||||||
for s := 0; s < numIntervals; s++ {
|
for s := 0; s < numIntervals; s++ {
|
||||||
a, err := storage.Appender()
|
a := storage.Appender()
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
ts := int64(s * 10000) // 10s interval.
|
ts := int64(s * 10000) // 10s interval.
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
err := a.AddFast(metric, refs[i], ts, float64(s))
|
err := a.AddFast(refs[i], ts, float64(s))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
refs[i], _ = a.Add(metric, ts, float64(s))
|
refs[i], _ = a.Add(metric, ts, float64(s))
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,15 +38,13 @@ func TestDeriv(t *testing.T) {
|
||||||
}
|
}
|
||||||
engine := NewEngine(opts)
|
engine := NewEngine(opts)
|
||||||
|
|
||||||
a, err := storage.Appender()
|
a := storage.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
metric := labels.FromStrings("__name__", "foo")
|
metric := labels.FromStrings("__name__", "foo")
|
||||||
a.Add(metric, 1493712816939, 1.0)
|
a.Add(metric, 1493712816939, 1.0)
|
||||||
a.Add(metric, 1493712846939, 1.0)
|
a.Add(metric, 1493712846939, 1.0)
|
||||||
|
|
||||||
err = a.Commit()
|
testutil.Ok(t, a.Commit())
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
|
query, err := engine.NewInstantQuery(storage, "deriv(foo[30m])", timestamp.Time(1493712846939))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
|
@ -28,8 +28,7 @@ func TestEvaluations(t *testing.T) {
|
||||||
test, err := newTestFromFile(t, fn)
|
test, err := newTestFromFile(t, fn)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
err = test.Run()
|
testutil.Ok(t, test.Run())
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
test.Close()
|
test.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -412,10 +412,9 @@ func (cmd clearCmd) String() string {
|
||||||
// is reached, evaluation errors do not terminate execution.
|
// is reached, evaluation errors do not terminate execution.
|
||||||
func (t *Test) Run() error {
|
func (t *Test) Run() error {
|
||||||
for _, cmd := range t.cmds {
|
for _, cmd := range t.cmds {
|
||||||
err := t.exec(cmd)
|
|
||||||
// TODO(fabxc): aggregate command errors, yield diffs for result
|
// TODO(fabxc): aggregate command errors, yield diffs for result
|
||||||
// comparison errors.
|
// comparison errors.
|
||||||
if err != nil {
|
if err := t.exec(cmd); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -429,10 +428,7 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
t.clear()
|
t.clear()
|
||||||
|
|
||||||
case *loadCmd:
|
case *loadCmd:
|
||||||
app, err := t.storage.Appender()
|
app := t.storage.Appender()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := cmd.append(app); err != nil {
|
if err := cmd.append(app); err != nil {
|
||||||
app.Rollback()
|
app.Rollback()
|
||||||
return err
|
return err
|
||||||
|
@ -447,6 +443,7 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer q.Close()
|
||||||
res := q.Exec(t.context)
|
res := q.Exec(t.context)
|
||||||
if res.Err != nil {
|
if res.Err != nil {
|
||||||
if cmd.fail {
|
if cmd.fail {
|
||||||
|
@ -454,7 +451,6 @@ func (t *Test) exec(tc testCommand) error {
|
||||||
}
|
}
|
||||||
return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", cmd.expr, cmd.line)
|
return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", cmd.expr, cmd.line)
|
||||||
}
|
}
|
||||||
defer q.Close()
|
|
||||||
if res.Err == nil && cmd.fail {
|
if res.Err == nil && cmd.fail {
|
||||||
return errors.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
return errors.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||||
}
|
}
|
||||||
|
@ -642,10 +638,7 @@ func (ll *LazyLoader) clear() {
|
||||||
|
|
||||||
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
|
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
|
||||||
func (ll *LazyLoader) appendTill(ts int64) error {
|
func (ll *LazyLoader) appendTill(ts int64) error {
|
||||||
app, err := ll.storage.Appender()
|
app := ll.storage.Appender()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for h, smpls := range ll.loadCmd.defs {
|
for h, smpls := range ll.loadCmd.defs {
|
||||||
m := ll.loadCmd.metrics[h]
|
m := ll.loadCmd.metrics[h]
|
||||||
for i, s := range smpls {
|
for i, s := range smpls {
|
||||||
|
|
|
@ -20,6 +20,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -257,7 +260,7 @@ func (ss *StorageSeries) Labels() labels.Labels {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterator returns a new iterator of the data of the series.
|
// Iterator returns a new iterator of the data of the series.
|
||||||
func (ss *StorageSeries) Iterator() storage.SeriesIterator {
|
func (ss *StorageSeries) Iterator() chunkenc.Iterator {
|
||||||
return newStorageSeriesIterator(ss.series)
|
return newStorageSeriesIterator(ss.series)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -588,12 +588,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
||||||
numDuplicates = 0
|
numDuplicates = 0
|
||||||
)
|
)
|
||||||
|
|
||||||
app, err := g.opts.Appendable.Appender()
|
app := g.opts.Appendable.Appender()
|
||||||
if err != nil {
|
|
||||||
level.Warn(g.logger).Log("msg", "creating appender failed", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
|
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
|
||||||
for _, s := range vector {
|
for _, s := range vector {
|
||||||
if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
|
if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
|
||||||
|
@ -646,14 +641,10 @@ func (g *Group) cleanupStaleSeries(ts time.Time) {
|
||||||
if len(g.staleSeries) == 0 {
|
if len(g.staleSeries) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
app, err := g.opts.Appendable.Appender()
|
app := g.opts.Appendable.Appender()
|
||||||
if err != nil {
|
|
||||||
level.Warn(g.logger).Log("msg", "creating appender failed", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, s := range g.staleSeries {
|
for _, s := range g.staleSeries {
|
||||||
// Rule that produced series no longer configured, mark it stale.
|
// Rule that produced series no longer configured, mark it stale.
|
||||||
_, err = app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
_, err := app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
|
case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
|
||||||
|
@ -837,11 +828,6 @@ type Manager struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appendable returns an Appender.
|
|
||||||
type Appendable interface {
|
|
||||||
Appender() (storage.Appender, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
|
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
|
||||||
type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
|
type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
|
||||||
|
|
||||||
|
@ -851,7 +837,7 @@ type ManagerOptions struct {
|
||||||
QueryFunc QueryFunc
|
QueryFunc QueryFunc
|
||||||
NotifyFunc NotifyFunc
|
NotifyFunc NotifyFunc
|
||||||
Context context.Context
|
Context context.Context
|
||||||
Appendable Appendable
|
Appendable storage.Appendable
|
||||||
TSDB storage.Storage
|
TSDB storage.Storage
|
||||||
Logger log.Logger
|
Logger log.Logger
|
||||||
Registerer prometheus.Registerer
|
Registerer prometheus.Registerer
|
||||||
|
|
|
@ -541,7 +541,7 @@ func TestStaleness(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// A time series that has two samples and then goes stale.
|
// A time series that has two samples and then goes stale.
|
||||||
app, _ := storage.Appender()
|
app := storage.Appender()
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
|
||||||
|
@ -869,7 +869,7 @@ func TestNotify(t *testing.T) {
|
||||||
Opts: opts,
|
Opts: opts,
|
||||||
})
|
})
|
||||||
|
|
||||||
app, _ := storage.Appender()
|
app := storage.Appender()
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 3)
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 3)
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 5000, 3)
|
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 5000, 3)
|
||||||
|
|
|
@ -20,16 +20,16 @@ import (
|
||||||
|
|
||||||
type nopAppendable struct{}
|
type nopAppendable struct{}
|
||||||
|
|
||||||
func (a nopAppendable) Appender() (storage.Appender, error) {
|
func (a nopAppendable) Appender() storage.Appender {
|
||||||
return nopAppender{}, nil
|
return nopAppender{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type nopAppender struct{}
|
type nopAppender struct{}
|
||||||
|
|
||||||
func (a nopAppender) Add(labels.Labels, int64, float64) (uint64, error) { return 0, nil }
|
func (a nopAppender) Add(labels.Labels, int64, float64) (uint64, error) { return 0, nil }
|
||||||
func (a nopAppender) AddFast(labels.Labels, uint64, int64, float64) error { return nil }
|
func (a nopAppender) AddFast(uint64, int64, float64) error { return nil }
|
||||||
func (a nopAppender) Commit() error { return nil }
|
func (a nopAppender) Commit() error { return nil }
|
||||||
func (a nopAppender) Rollback() error { return nil }
|
func (a nopAppender) Rollback() error { return nil }
|
||||||
|
|
||||||
type sample struct {
|
type sample struct {
|
||||||
metric labels.Labels
|
metric labels.Labels
|
||||||
|
@ -42,18 +42,21 @@ type sample struct {
|
||||||
type collectResultAppender struct {
|
type collectResultAppender struct {
|
||||||
next storage.Appender
|
next storage.Appender
|
||||||
result []sample
|
result []sample
|
||||||
|
|
||||||
|
mapper map[uint64]labels.Labels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *collectResultAppender) AddFast(m labels.Labels, ref uint64, t int64, v float64) error {
|
func (a *collectResultAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
if a.next == nil {
|
if a.next == nil {
|
||||||
return storage.ErrNotFound
|
return storage.ErrNotFound
|
||||||
}
|
}
|
||||||
err := a.next.AddFast(m, ref, t, v)
|
|
||||||
|
err := a.next.AddFast(ref, t, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
a.result = append(a.result, sample{
|
a.result = append(a.result, sample{
|
||||||
metric: m,
|
metric: a.mapper[ref],
|
||||||
t: t,
|
t: t,
|
||||||
v: v,
|
v: v,
|
||||||
})
|
})
|
||||||
|
@ -69,7 +72,17 @@ func (a *collectResultAppender) Add(m labels.Labels, t int64, v float64) (uint64
|
||||||
if a.next == nil {
|
if a.next == nil {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
return a.next.Add(m, t, v)
|
|
||||||
|
if a.mapper == nil {
|
||||||
|
a.mapper = map[uint64]labels.Labels{}
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := a.next.Add(m, t, v)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
a.mapper[ref] = m
|
||||||
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *collectResultAppender) Commit() error { return nil }
|
func (a *collectResultAppender) Commit() error { return nil }
|
||||||
|
|
|
@ -100,13 +100,8 @@ func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appendable returns an Appender.
|
|
||||||
type Appendable interface {
|
|
||||||
Appender() (storage.Appender, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewManager is the Manager constructor
|
// NewManager is the Manager constructor
|
||||||
func NewManager(logger log.Logger, app Appendable) *Manager {
|
func NewManager(logger log.Logger, app storage.Appendable) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
@ -127,7 +122,7 @@ func NewManager(logger log.Logger, app Appendable) *Manager {
|
||||||
// when receiving new target groups form the discovery manager.
|
// when receiving new target groups form the discovery manager.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
append Appendable
|
append storage.Appendable
|
||||||
graceShut chan struct{}
|
graceShut chan struct{}
|
||||||
|
|
||||||
jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup.
|
jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup.
|
||||||
|
|
|
@ -156,7 +156,7 @@ func init() {
|
||||||
|
|
||||||
// scrapePool manages scrapes for sets of targets.
|
// scrapePool manages scrapes for sets of targets.
|
||||||
type scrapePool struct {
|
type scrapePool struct {
|
||||||
appendable Appendable
|
appendable storage.Appendable
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
|
@ -187,7 +187,7 @@ const maxAheadTime = 10 * time.Minute
|
||||||
|
|
||||||
type labelsMutator func(labels.Labels) labels.Labels
|
type labelsMutator func(labels.Labels) labels.Labels
|
||||||
|
|
||||||
func newScrapePool(cfg *config.ScrapeConfig, app Appendable, jitterSeed uint64, logger log.Logger) (*scrapePool, error) {
|
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger) (*scrapePool, error) {
|
||||||
targetScrapePools.Inc()
|
targetScrapePools.Inc()
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
|
@ -228,13 +228,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, jitterSeed uint64,
|
||||||
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
||||||
},
|
},
|
||||||
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
||||||
func() storage.Appender {
|
func() storage.Appender { return appender(app.Appender(), opts.limit) },
|
||||||
app, err := app.Appender()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return appender(app, opts.limit)
|
|
||||||
},
|
|
||||||
cache,
|
cache,
|
||||||
jitterSeed,
|
jitterSeed,
|
||||||
opts.honorTimestamps,
|
opts.honorTimestamps,
|
||||||
|
@ -1112,7 +1106,7 @@ loop:
|
||||||
}
|
}
|
||||||
ce, ok := sl.cache.get(yoloString(met))
|
ce, ok := sl.cache.get(yoloString(met))
|
||||||
if ok {
|
if ok {
|
||||||
switch err = app.AddFast(ce.lset, ce.ref, t, v); err {
|
switch err = app.AddFast(ce.ref, t, v); err {
|
||||||
case nil:
|
case nil:
|
||||||
if tp == nil {
|
if tp == nil {
|
||||||
sl.cache.trackStaleness(ce.hash, ce.lset)
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
||||||
|
@ -1323,7 +1317,7 @@ func (sl *scrapeLoop) reportStale(start time.Time) error {
|
||||||
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error {
|
func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error {
|
||||||
ce, ok := sl.cache.get(s)
|
ce, ok := sl.cache.get(s)
|
||||||
if ok {
|
if ok {
|
||||||
err := app.AddFast(ce.lset, ce.ref, t, v)
|
err := app.AddFast(ce.ref, t, v)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -644,8 +644,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
app, err := s.Appender()
|
app := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sl := newScrapeLoop(ctx,
|
sl := newScrapeLoop(ctx,
|
||||||
|
@ -788,8 +787,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
sapp, err := s.Appender()
|
sapp := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
appender := &collectResultAppender{next: sapp}
|
appender := &collectResultAppender{next: sapp}
|
||||||
var (
|
var (
|
||||||
|
@ -866,8 +864,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
sapp, err := s.Appender()
|
sapp := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
appender := &collectResultAppender{next: sapp}
|
appender := &collectResultAppender{next: sapp}
|
||||||
var (
|
var (
|
||||||
|
@ -1092,8 +1089,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
app, err := s.Appender()
|
app := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
capp := &collectResultAppender{next: app}
|
capp := &collectResultAppender{next: app}
|
||||||
|
|
||||||
|
@ -1108,7 +1104,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
_, _, _, err = sl.append([]byte(`metric_a{a="1",b="1"} 1`), "", now)
|
_, _, _, err := sl.append([]byte(`metric_a{a="1",b="1"} 1`), "", now)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
_, _, _, err = sl.append([]byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute))
|
_, _, _, err = sl.append([]byte(`metric_a{b="1",a="1"} 2`), "", now.Add(time.Minute))
|
||||||
|
@ -1273,8 +1269,8 @@ func (app *errorAppender) Add(lset labels.Labels, t int64, v float64) (uint64, e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *errorAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
|
func (app *errorAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
return app.collectResultAppender.AddFast(lset, ref, t, v)
|
return app.collectResultAppender.AddFast(ref, t, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
|
func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T) {
|
||||||
|
@ -1498,8 +1494,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
app, err := s.Appender()
|
app := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
capp := &collectResultAppender{next: app}
|
capp := &collectResultAppender{next: app}
|
||||||
|
|
||||||
|
@ -1513,7 +1508,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
_, _, _, err = sl.append([]byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
_, _, _, err := sl.append([]byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
want := []sample{
|
want := []sample{
|
||||||
|
@ -1530,8 +1525,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
app, err := s.Appender()
|
app := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
capp := &collectResultAppender{next: app}
|
capp := &collectResultAppender{next: app}
|
||||||
|
|
||||||
|
@ -1545,7 +1539,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
_, _, _, err = sl.append([]byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
_, _, _, err := sl.append([]byte(`metric_a{a="1",b="1"} 1 0`), "", now)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
want := []sample{
|
want := []sample{
|
||||||
|
@ -1562,8 +1556,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
||||||
s := teststorage.New(t)
|
s := teststorage.New(t)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
app, err := s.Appender()
|
app := s.Appender()
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
sl := newScrapeLoop(ctx,
|
sl := newScrapeLoop(ctx,
|
||||||
|
@ -1579,7 +1572,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// We add a good and a bad metric to check that both are discarded.
|
// We add a good and a bad metric to check that both are discarded.
|
||||||
_, _, _, err = sl.append([]byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
|
_, _, _, err := sl.append([]byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
|
||||||
testutil.NotOk(t, err)
|
testutil.NotOk(t, err)
|
||||||
|
|
||||||
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
|
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0)
|
||||||
|
|
|
@ -303,14 +303,14 @@ func (app *limitAppender) Add(lset labels.Labels, t int64, v float64) (uint64, e
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *limitAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
|
func (app *limitAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
if !value.IsStaleNaN(v) {
|
if !value.IsStaleNaN(v) {
|
||||||
app.i++
|
app.i++
|
||||||
if app.i > app.limit {
|
if app.i > app.limit {
|
||||||
return errSampleLimit
|
return errSampleLimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := app.Appender.AddFast(lset, ref, t, v)
|
err := app.Appender.AddFast(ref, t, v)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,11 +332,11 @@ func (app *timeLimitAppender) Add(lset labels.Labels, t int64, v float64) (uint6
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *timeLimitAppender) AddFast(lset labels.Labels, ref uint64, t int64, v float64) error {
|
func (app *timeLimitAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
if t > app.maxTime {
|
if t > app.maxTime {
|
||||||
return storage.ErrOutOfBounds
|
return storage.ErrOutOfBounds
|
||||||
}
|
}
|
||||||
err := app.Appender.AddFast(lset, ref, t, v)
|
err := app.Appender.AddFast(ref, t, v)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,11 +15,13 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BufferedSeriesIterator wraps an iterator with a look-back buffer.
|
// BufferedSeriesIterator wraps an iterator with a look-back buffer.
|
||||||
type BufferedSeriesIterator struct {
|
type BufferedSeriesIterator struct {
|
||||||
it SeriesIterator
|
it chunkenc.Iterator
|
||||||
buf *sampleRing
|
buf *sampleRing
|
||||||
delta int64
|
delta int64
|
||||||
|
|
||||||
|
@ -31,12 +33,12 @@ type BufferedSeriesIterator struct {
|
||||||
// of the current element and the duration of delta before, initialized with an
|
// of the current element and the duration of delta before, initialized with an
|
||||||
// empty iterator. Use Reset() to set an actual iterator to be buffered.
|
// empty iterator. Use Reset() to set an actual iterator to be buffered.
|
||||||
func NewBuffer(delta int64) *BufferedSeriesIterator {
|
func NewBuffer(delta int64) *BufferedSeriesIterator {
|
||||||
return NewBufferIterator(&NoopSeriesIt, delta)
|
return NewBufferIterator(chunkenc.NewNopIterator(), delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBufferIterator returns a new iterator that buffers the values within the
|
// NewBufferIterator returns a new iterator that buffers the values within the
|
||||||
// time range of the current element and the duration of delta before.
|
// time range of the current element and the duration of delta before.
|
||||||
func NewBufferIterator(it SeriesIterator, delta int64) *BufferedSeriesIterator {
|
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
|
||||||
bit := &BufferedSeriesIterator{
|
bit := &BufferedSeriesIterator{
|
||||||
buf: newSampleRing(delta, 16),
|
buf: newSampleRing(delta, 16),
|
||||||
delta: delta,
|
delta: delta,
|
||||||
|
@ -48,7 +50,7 @@ func NewBufferIterator(it SeriesIterator, delta int64) *BufferedSeriesIterator {
|
||||||
|
|
||||||
// Reset re-uses the buffer with a new iterator, resetting the buffered time
|
// Reset re-uses the buffer with a new iterator, resetting the buffered time
|
||||||
// delta to its original value.
|
// delta to its original value.
|
||||||
func (b *BufferedSeriesIterator) Reset(it SeriesIterator) {
|
func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) {
|
||||||
b.it = it
|
b.it = it
|
||||||
b.lastTime = math.MinInt64
|
b.lastTime = math.MinInt64
|
||||||
b.ok = true
|
b.ok = true
|
||||||
|
@ -70,7 +72,7 @@ func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) {
|
||||||
|
|
||||||
// Buffer returns an iterator over the buffered data. Invalidates previously
|
// Buffer returns an iterator over the buffered data. Invalidates previously
|
||||||
// returned iterators.
|
// returned iterators.
|
||||||
func (b *BufferedSeriesIterator) Buffer() SeriesIterator {
|
func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator {
|
||||||
return b.buf.iterator()
|
return b.buf.iterator()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +161,7 @@ func (r *sampleRing) reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the current iterator. Invalidates previously returned iterators.
|
// Returns the current iterator. Invalidates previously returned iterators.
|
||||||
func (r *sampleRing) iterator() SeriesIterator {
|
func (r *sampleRing) iterator() chunkenc.Iterator {
|
||||||
r.it.r = r
|
r.it.r = r
|
||||||
r.it.i = -1
|
r.it.i = -1
|
||||||
return &r.it
|
return &r.it
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -190,7 +191,7 @@ func (m *mockSeriesIterator) Err() error { return m.err() }
|
||||||
|
|
||||||
type mockSeries struct {
|
type mockSeries struct {
|
||||||
labels func() labels.Labels
|
labels func() labels.Labels
|
||||||
iterator func() SeriesIterator
|
iterator func() chunkenc.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMockSeries(lset labels.Labels, samples []sample) Series {
|
func newMockSeries(lset labels.Labels, samples []sample) Series {
|
||||||
|
@ -198,14 +199,14 @@ func newMockSeries(lset labels.Labels, samples []sample) Series {
|
||||||
labels: func() labels.Labels {
|
labels: func() labels.Labels {
|
||||||
return lset
|
return lset
|
||||||
},
|
},
|
||||||
iterator: func() SeriesIterator {
|
iterator: func() chunkenc.Iterator {
|
||||||
return newListSeriesIterator(samples)
|
return newListSeriesIterator(samples)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockSeries) Labels() labels.Labels { return m.labels() }
|
func (m *mockSeries) Labels() labels.Labels { return m.labels() }
|
||||||
func (m *mockSeries) Iterator() SeriesIterator { return m.iterator() }
|
func (m *mockSeries) Iterator() chunkenc.Iterator { return m.iterator() }
|
||||||
|
|
||||||
type listSeriesIterator struct {
|
type listSeriesIterator struct {
|
||||||
list []sample
|
list []sample
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fanout struct {
|
type fanout struct {
|
||||||
|
@ -87,25 +88,17 @@ func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error)
|
||||||
return NewMergeQuerier(primaryQuerier, queriers), nil
|
return NewMergeQuerier(primaryQuerier, queriers), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fanout) Appender() (Appender, error) {
|
func (f *fanout) Appender() Appender {
|
||||||
primary, err := f.primary.Appender()
|
primary := f.primary.Appender()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
secondaries := make([]Appender, 0, len(f.secondaries))
|
secondaries := make([]Appender, 0, len(f.secondaries))
|
||||||
for _, storage := range f.secondaries {
|
for _, storage := range f.secondaries {
|
||||||
appender, err := storage.Appender()
|
secondaries = append(secondaries, storage.Appender())
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
secondaries = append(secondaries, appender)
|
|
||||||
}
|
}
|
||||||
return &fanoutAppender{
|
return &fanoutAppender{
|
||||||
logger: f.logger,
|
logger: f.logger,
|
||||||
primary: primary,
|
primary: primary,
|
||||||
secondaries: secondaries,
|
secondaries: secondaries,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the storage and all its underlying resources.
|
// Close closes the storage and all its underlying resources.
|
||||||
|
@ -146,13 +139,13 @@ func (f *fanoutAppender) Add(l labels.Labels, t int64, v float64) (uint64, error
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fanoutAppender) AddFast(l labels.Labels, ref uint64, t int64, v float64) error {
|
func (f *fanoutAppender) AddFast(ref uint64, t int64, v float64) error {
|
||||||
if err := f.primary.AddFast(l, ref, t, v); err != nil {
|
if err := f.primary.AddFast(ref, t, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, appender := range f.secondaries {
|
for _, appender := range f.secondaries {
|
||||||
if _, err := appender.Add(l, t, v); err != nil {
|
if err := appender.AddFast(ref, t, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -521,8 +514,8 @@ func (m *mergeSeries) Labels() labels.Labels {
|
||||||
return m.labels
|
return m.labels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mergeSeries) Iterator() SeriesIterator {
|
func (m *mergeSeries) Iterator() chunkenc.Iterator {
|
||||||
iterators := make([]SeriesIterator, 0, len(m.series))
|
iterators := make([]chunkenc.Iterator, 0, len(m.series))
|
||||||
for _, s := range m.series {
|
for _, s := range m.series {
|
||||||
iterators = append(iterators, s.Iterator())
|
iterators = append(iterators, s.Iterator())
|
||||||
}
|
}
|
||||||
|
@ -530,11 +523,11 @@ func (m *mergeSeries) Iterator() SeriesIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
type mergeIterator struct {
|
type mergeIterator struct {
|
||||||
iterators []SeriesIterator
|
iterators []chunkenc.Iterator
|
||||||
h seriesIteratorHeap
|
h seriesIteratorHeap
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMergeIterator(iterators []SeriesIterator) SeriesIterator {
|
func newMergeIterator(iterators []chunkenc.Iterator) chunkenc.Iterator {
|
||||||
return &mergeIterator{
|
return &mergeIterator{
|
||||||
iterators: iterators,
|
iterators: iterators,
|
||||||
h: nil,
|
h: nil,
|
||||||
|
@ -581,7 +574,7 @@ func (c *mergeIterator) Next() bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
iter := heap.Pop(&c.h).(SeriesIterator)
|
iter := heap.Pop(&c.h).(chunkenc.Iterator)
|
||||||
if iter.Next() {
|
if iter.Next() {
|
||||||
heap.Push(&c.h, iter)
|
heap.Push(&c.h, iter)
|
||||||
}
|
}
|
||||||
|
@ -599,7 +592,7 @@ func (c *mergeIterator) Err() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type seriesIteratorHeap []SeriesIterator
|
type seriesIteratorHeap []chunkenc.Iterator
|
||||||
|
|
||||||
func (h seriesIteratorHeap) Len() int { return len(h) }
|
func (h seriesIteratorHeap) Len() int { return len(h) }
|
||||||
func (h seriesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
func (h seriesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||||
|
@ -611,7 +604,7 @@ func (h seriesIteratorHeap) Less(i, j int) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *seriesIteratorHeap) Push(x interface{}) {
|
func (h *seriesIteratorHeap) Push(x interface{}) {
|
||||||
*h = append(*h, x.(SeriesIterator))
|
*h = append(*h, x.(chunkenc.Iterator))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *seriesIteratorHeap) Pop() interface{} {
|
func (h *seriesIteratorHeap) Pop() interface{} {
|
||||||
|
|
|
@ -15,6 +15,7 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -33,7 +34,7 @@ func TestSelectSorted(t *testing.T) {
|
||||||
|
|
||||||
priStorage := teststorage.New(t)
|
priStorage := teststorage.New(t)
|
||||||
defer priStorage.Close()
|
defer priStorage.Close()
|
||||||
app1, _ := priStorage.Appender()
|
app1 := priStorage.Appender()
|
||||||
app1.Add(inputLabel, 0, 0)
|
app1.Add(inputLabel, 0, 0)
|
||||||
inputTotalSize++
|
inputTotalSize++
|
||||||
app1.Add(inputLabel, 1000, 1)
|
app1.Add(inputLabel, 1000, 1)
|
||||||
|
@ -45,7 +46,7 @@ func TestSelectSorted(t *testing.T) {
|
||||||
|
|
||||||
remoteStorage1 := teststorage.New(t)
|
remoteStorage1 := teststorage.New(t)
|
||||||
defer remoteStorage1.Close()
|
defer remoteStorage1.Close()
|
||||||
app2, _ := remoteStorage1.Appender()
|
app2 := remoteStorage1.Appender()
|
||||||
app2.Add(inputLabel, 3000, 3)
|
app2.Add(inputLabel, 3000, 3)
|
||||||
inputTotalSize++
|
inputTotalSize++
|
||||||
app2.Add(inputLabel, 4000, 4)
|
app2.Add(inputLabel, 4000, 4)
|
||||||
|
@ -58,7 +59,7 @@ func TestSelectSorted(t *testing.T) {
|
||||||
remoteStorage2 := teststorage.New(t)
|
remoteStorage2 := teststorage.New(t)
|
||||||
defer remoteStorage2.Close()
|
defer remoteStorage2.Close()
|
||||||
|
|
||||||
app3, _ := remoteStorage2.Appender()
|
app3 := remoteStorage2.Appender()
|
||||||
app3.Add(inputLabel, 6000, 6)
|
app3.Add(inputLabel, 6000, 6)
|
||||||
inputTotalSize++
|
inputTotalSize++
|
||||||
app3.Add(inputLabel, 7000, 7)
|
app3.Add(inputLabel, 7000, 7)
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -122,24 +123,24 @@ func TestMergeSeriesSet(t *testing.T) {
|
||||||
|
|
||||||
func TestMergeIterator(t *testing.T) {
|
func TestMergeIterator(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
input []SeriesIterator
|
input []chunkenc.Iterator
|
||||||
expected []sample
|
expected []sample
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
||||||
},
|
},
|
||||||
expected: []sample{{0, 0}, {1, 1}},
|
expected: []sample{{0, 0}, {1, 1}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
||||||
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
||||||
},
|
},
|
||||||
expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}},
|
expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
|
newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
|
||||||
newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
|
newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
|
||||||
newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
|
newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
|
||||||
|
@ -147,7 +148,7 @@ func TestMergeIterator(t *testing.T) {
|
||||||
expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}},
|
expected: []sample{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
||||||
newListSeriesIterator([]sample{{0, 0}, {2, 2}}),
|
newListSeriesIterator([]sample{{0, 0}, {2, 2}}),
|
||||||
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
||||||
|
@ -163,19 +164,19 @@ func TestMergeIterator(t *testing.T) {
|
||||||
|
|
||||||
func TestMergeIteratorSeek(t *testing.T) {
|
func TestMergeIteratorSeek(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
input []SeriesIterator
|
input []chunkenc.Iterator
|
||||||
seek int64
|
seek int64
|
||||||
expected []sample
|
expected []sample
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {1, 1}, {2, 2}}),
|
newListSeriesIterator([]sample{{0, 0}, {1, 1}, {2, 2}}),
|
||||||
},
|
},
|
||||||
seek: 1,
|
seek: 1,
|
||||||
expected: []sample{{1, 1}, {2, 2}},
|
expected: []sample{{1, 1}, {2, 2}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
newListSeriesIterator([]sample{{0, 0}, {1, 1}}),
|
||||||
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
newListSeriesIterator([]sample{{2, 2}, {3, 3}}),
|
||||||
},
|
},
|
||||||
|
@ -183,7 +184,7 @@ func TestMergeIteratorSeek(t *testing.T) {
|
||||||
expected: []sample{{2, 2}, {3, 3}},
|
expected: []sample{{2, 2}, {3, 3}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: []SeriesIterator{
|
input: []chunkenc.Iterator{
|
||||||
newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
|
newListSeriesIterator([]sample{{0, 0}, {3, 3}}),
|
||||||
newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
|
newListSeriesIterator([]sample{{1, 1}, {4, 4}}),
|
||||||
newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
|
newListSeriesIterator([]sample{{2, 2}, {5, 5}}),
|
||||||
|
@ -203,7 +204,7 @@ func TestMergeIteratorSeek(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func drainSamples(iter SeriesIterator) []sample {
|
func drainSamples(iter chunkenc.Iterator) []sample {
|
||||||
result := []sample{}
|
result := []sample{}
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
t, v := iter.At()
|
t, v := iter.At()
|
||||||
|
|
|
@ -18,6 +18,9 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The errors exposed.
|
// The errors exposed.
|
||||||
|
@ -28,17 +31,21 @@ var (
|
||||||
ErrOutOfBounds = errors.New("out of bounds")
|
ErrOutOfBounds = errors.New("out of bounds")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Appendable allows creating appenders.
|
||||||
|
type Appendable interface {
|
||||||
|
// Appender returns a new appender for the storage.
|
||||||
|
Appender() Appender
|
||||||
|
}
|
||||||
|
|
||||||
// Storage ingests and manages samples, along with various indexes. All methods
|
// Storage ingests and manages samples, along with various indexes. All methods
|
||||||
// are goroutine-safe. Storage implements storage.SampleAppender.
|
// are goroutine-safe. Storage implements storage.SampleAppender.
|
||||||
type Storage interface {
|
type Storage interface {
|
||||||
Queryable
|
Queryable
|
||||||
|
Appendable
|
||||||
|
|
||||||
// StartTime returns the oldest timestamp stored in the storage.
|
// StartTime returns the oldest timestamp stored in the storage.
|
||||||
StartTime() (int64, error)
|
StartTime() (int64, error)
|
||||||
|
|
||||||
// Appender returns a new appender against the storage.
|
|
||||||
Appender() (Appender, error)
|
|
||||||
|
|
||||||
// Close closes the storage and all its underlying resources.
|
// Close closes the storage and all its underlying resources.
|
||||||
Close() error
|
Close() error
|
||||||
}
|
}
|
||||||
|
@ -49,7 +56,8 @@ type Queryable interface {
|
||||||
Querier(ctx context.Context, mint, maxt int64) (Querier, error)
|
Querier(ctx context.Context, mint, maxt int64) (Querier, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Querier provides reading access to time series data.
|
// Querier provides querying access over time series data of a fixed
|
||||||
|
// time range.
|
||||||
type Querier interface {
|
type Querier interface {
|
||||||
// Select returns a set of series that matches the given label matchers.
|
// Select returns a set of series that matches the given label matchers.
|
||||||
Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error)
|
Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error)
|
||||||
|
@ -58,6 +66,7 @@ type Querier interface {
|
||||||
SelectSorted(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error)
|
SelectSorted(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error)
|
||||||
|
|
||||||
// LabelValues returns all potential values for a label name.
|
// LabelValues returns all potential values for a label name.
|
||||||
|
// It is not safe to use the strings beyond the lifefime of the querier.
|
||||||
LabelValues(name string) ([]string, Warnings, error)
|
LabelValues(name string) ([]string, Warnings, error)
|
||||||
|
|
||||||
// LabelNames returns all the unique label names present in the block in sorted order.
|
// LabelNames returns all the unique label names present in the block in sorted order.
|
||||||
|
@ -90,14 +99,28 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender provides batched appends against a storage.
|
// Appender provides batched appends against a storage.
|
||||||
|
// It must be completed with a call to Commit or Rollback and must not be reused afterwards.
|
||||||
|
//
|
||||||
|
// Operations on the Appender interface are not goroutine-safe.
|
||||||
type Appender interface {
|
type Appender interface {
|
||||||
|
// Add adds a sample pair for the given series. A reference number is
|
||||||
|
// returned which can be used to add further samples in the same or later
|
||||||
|
// transactions.
|
||||||
|
// Returned reference numbers are ephemeral and may be rejected in calls
|
||||||
|
// to AddFast() at any point. Adding the sample via Add() returns a new
|
||||||
|
// reference number.
|
||||||
|
// If the reference is 0 it must not be used for caching.
|
||||||
Add(l labels.Labels, t int64, v float64) (uint64, error)
|
Add(l labels.Labels, t int64, v float64) (uint64, error)
|
||||||
|
|
||||||
AddFast(l labels.Labels, ref uint64, t int64, v float64) error
|
// AddFast adds a sample pair for the referenced series. It is generally
|
||||||
|
// faster than adding a sample by providing its full label set.
|
||||||
|
AddFast(ref uint64, t int64, v float64) error
|
||||||
|
|
||||||
// Commit submits the collected samples and purges the batch.
|
// Commit submits the collected samples and purges the batch.
|
||||||
Commit() error
|
Commit() error
|
||||||
|
|
||||||
|
// Rollback rolls back all modifications made in the appender so far.
|
||||||
|
// Appender has to be discarded after rollback.
|
||||||
Rollback() error
|
Rollback() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,25 +131,36 @@ type SeriesSet interface {
|
||||||
Err() error
|
Err() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var emptySeriesSet = errSeriesSet{}
|
||||||
|
|
||||||
|
// EmptySeriesSet returns a series set that's always empty.
|
||||||
|
func EmptySeriesSet() SeriesSet {
|
||||||
|
return emptySeriesSet
|
||||||
|
}
|
||||||
|
|
||||||
|
type errSeriesSet struct {
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s errSeriesSet) Next() bool { return false }
|
||||||
|
func (s errSeriesSet) At() Series { return nil }
|
||||||
|
func (s errSeriesSet) Err() error { return s.err }
|
||||||
|
|
||||||
// Series represents a single time series.
|
// Series represents a single time series.
|
||||||
type Series interface {
|
type Series interface {
|
||||||
// Labels returns the complete set of labels identifying the series.
|
// Labels returns the complete set of labels identifying the series.
|
||||||
Labels() labels.Labels
|
Labels() labels.Labels
|
||||||
|
|
||||||
// Iterator returns a new iterator of the data of the series.
|
// Iterator returns a new iterator of the data of the series.
|
||||||
Iterator() SeriesIterator
|
Iterator() chunkenc.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeriesIterator iterates over the data of a time series.
|
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
|
||||||
type SeriesIterator interface {
|
// actual series itself.
|
||||||
// Seek advances the iterator forward to the value at or after
|
// TODO(bwplotka): Move it to Series liike Iterator that iterates over chunks and avoiding loading all of them at once.
|
||||||
// the given timestamp.
|
type ChunkSeriesSet interface {
|
||||||
Seek(t int64) bool
|
|
||||||
// At returns the current timestamp/value pair.
|
|
||||||
At() (t int64, v float64)
|
|
||||||
// Next advances the iterator by one.
|
|
||||||
Next() bool
|
Next() bool
|
||||||
// Err returns the current error.
|
At() (labels.Labels, []chunks.Meta, tombstones.Intervals)
|
||||||
Err() error
|
Err() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,8 +14,6 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -53,35 +51,8 @@ func NoopSeriesSet() SeriesSet {
|
||||||
return noopSeriesSet{}
|
return noopSeriesSet{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (noopSeriesSet) Next() bool {
|
func (noopSeriesSet) Next() bool { return false }
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (noopSeriesSet) At() Series {
|
func (noopSeriesSet) At() Series { return nil }
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (noopSeriesSet) Err() error {
|
func (noopSeriesSet) Err() error { return nil }
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type noopSeriesIterator struct{}
|
|
||||||
|
|
||||||
// NoopSeriesIt is a SeriesIterator that does nothing.
|
|
||||||
var NoopSeriesIt = noopSeriesIterator{}
|
|
||||||
|
|
||||||
func (noopSeriesIterator) At() (int64, float64) {
|
|
||||||
return math.MinInt64, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (noopSeriesIterator) Seek(t int64) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (noopSeriesIterator) Next() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (noopSeriesIterator) Err() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -221,7 +221,6 @@ func StreamChunkedReadResponses(
|
||||||
if len(chks) == 0 {
|
if len(chks) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := proto.Marshal(&prompb.ChunkedReadResponse{
|
b, err := proto.Marshal(&prompb.ChunkedReadResponse{
|
||||||
ChunkedSeries: []*prompb.ChunkedSeries{
|
ChunkedSeries: []*prompb.ChunkedSeries{
|
||||||
{
|
{
|
||||||
|
@ -254,7 +253,7 @@ func StreamChunkedReadResponses(
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking).
|
// encodeChunks expects iterator to be ready to use (aka iter.Next() called before invoking).
|
||||||
func encodeChunks(iter storage.SeriesIterator, chks []prompb.Chunk, frameBytesLeft int) ([]prompb.Chunk, error) {
|
func encodeChunks(iter chunkenc.Iterator, chks []prompb.Chunk, frameBytesLeft int) ([]prompb.Chunk, error) {
|
||||||
const maxSamplesInChunk = 120
|
const maxSamplesInChunk = 120
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -392,7 +391,7 @@ func (c *concreteSeries) Labels() labels.Labels {
|
||||||
return labels.New(c.labels...)
|
return labels.New(c.labels...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *concreteSeries) Iterator() storage.SeriesIterator {
|
func (c *concreteSeries) Iterator() chunkenc.Iterator {
|
||||||
return newConcreteSeriersIterator(c)
|
return newConcreteSeriersIterator(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +401,7 @@ type concreteSeriesIterator struct {
|
||||||
series *concreteSeries
|
series *concreteSeries
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConcreteSeriersIterator(series *concreteSeries) storage.SeriesIterator {
|
func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
|
||||||
return &concreteSeriesIterator{
|
return &concreteSeriesIterator{
|
||||||
cur: -1,
|
cur: -1,
|
||||||
series: series,
|
series: series,
|
||||||
|
|
|
@ -149,7 +149,7 @@ func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querie
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender implements storage.Storage.
|
// Appender implements storage.Storage.
|
||||||
func (s *Storage) Appender() (storage.Appender, error) {
|
func (s *Storage) Appender() storage.Appender {
|
||||||
return s.rws.Appender()
|
return s.rws.Appender()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -183,10 +183,10 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender implements storage.Storage.
|
// Appender implements storage.Storage.
|
||||||
func (rws *WriteStorage) Appender() (storage.Appender, error) {
|
func (rws *WriteStorage) Appender() storage.Appender {
|
||||||
return ×tampTracker{
|
return ×tampTracker{
|
||||||
writeStorage: rws,
|
writeStorage: rws,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the WriteStorage.
|
// Close closes the WriteStorage.
|
||||||
|
@ -206,7 +206,7 @@ type timestampTracker struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add implements storage.Appender.
|
// Add implements storage.Appender.
|
||||||
func (t *timestampTracker) Add(_ labels.Labels, ts int64, v float64) (uint64, error) {
|
func (t *timestampTracker) Add(_ labels.Labels, ts int64, _ float64) (uint64, error) {
|
||||||
t.samples++
|
t.samples++
|
||||||
if ts > t.highestTimestamp {
|
if ts > t.highestTimestamp {
|
||||||
t.highestTimestamp = ts
|
t.highestTimestamp = ts
|
||||||
|
@ -215,8 +215,8 @@ func (t *timestampTracker) Add(_ labels.Labels, ts int64, v float64) (uint64, er
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFast implements storage.Appender.
|
// AddFast implements storage.Appender.
|
||||||
func (t *timestampTracker) AddFast(l labels.Labels, _ uint64, ts int64, v float64) error {
|
func (t *timestampTracker) AddFast(_ uint64, ts int64, v float64) error {
|
||||||
_, err := t.Add(l, ts, v)
|
_, err := t.Add(nil, ts, v)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,323 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package tsdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
|
||||||
"github.com/go-kit/kit/log"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrNotReady is returned if the underlying storage is not ready yet.
|
|
||||||
var ErrNotReady = errors.New("TSDB not ready")
|
|
||||||
|
|
||||||
// ReadyStorage implements the Storage interface while allowing to set the actual
|
|
||||||
// storage at a later point in time.
|
|
||||||
type ReadyStorage struct {
|
|
||||||
mtx sync.RWMutex
|
|
||||||
a *adapter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the storage.
|
|
||||||
func (s *ReadyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
|
|
||||||
s.mtx.Lock()
|
|
||||||
defer s.mtx.Unlock()
|
|
||||||
|
|
||||||
s.a = &adapter{db: db, startTimeMargin: startTimeMargin}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the storage.
|
|
||||||
func (s *ReadyStorage) Get() *tsdb.DB {
|
|
||||||
if x := s.get(); x != nil {
|
|
||||||
return x.db
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ReadyStorage) get() *adapter {
|
|
||||||
s.mtx.RLock()
|
|
||||||
x := s.a
|
|
||||||
s.mtx.RUnlock()
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartTime implements the Storage interface.
|
|
||||||
func (s *ReadyStorage) StartTime() (int64, error) {
|
|
||||||
if x := s.get(); x != nil {
|
|
||||||
return x.StartTime()
|
|
||||||
}
|
|
||||||
return int64(model.Latest), ErrNotReady
|
|
||||||
}
|
|
||||||
|
|
||||||
// Querier implements the Storage interface.
|
|
||||||
func (s *ReadyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
|
||||||
if x := s.get(); x != nil {
|
|
||||||
return x.Querier(ctx, mint, maxt)
|
|
||||||
}
|
|
||||||
return nil, ErrNotReady
|
|
||||||
}
|
|
||||||
|
|
||||||
// Appender implements the Storage interface.
|
|
||||||
func (s *ReadyStorage) Appender() (storage.Appender, error) {
|
|
||||||
if x := s.get(); x != nil {
|
|
||||||
return x.Appender()
|
|
||||||
}
|
|
||||||
return nil, ErrNotReady
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close implements the Storage interface.
|
|
||||||
func (s *ReadyStorage) Close() error {
|
|
||||||
if x := s.Get(); x != nil {
|
|
||||||
return x.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adapter return an adapter as storage.Storage.
|
|
||||||
func Adapter(db *tsdb.DB, startTimeMargin int64) storage.Storage {
|
|
||||||
return &adapter{db: db, startTimeMargin: startTimeMargin}
|
|
||||||
}
|
|
||||||
|
|
||||||
// adapter implements a storage.Storage around TSDB.
|
|
||||||
type adapter struct {
|
|
||||||
db *tsdb.DB
|
|
||||||
startTimeMargin int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options of the DB storage.
|
|
||||||
type Options struct {
|
|
||||||
// The timestamp range of head blocks after which they get persisted.
|
|
||||||
// It's the minimum duration of any persisted block.
|
|
||||||
MinBlockDuration model.Duration
|
|
||||||
|
|
||||||
// The maximum timestamp range of compacted blocks.
|
|
||||||
MaxBlockDuration model.Duration
|
|
||||||
|
|
||||||
// The maximum size of each WAL segment file.
|
|
||||||
WALSegmentSize units.Base2Bytes
|
|
||||||
|
|
||||||
// Duration for how long to retain data.
|
|
||||||
RetentionDuration model.Duration
|
|
||||||
|
|
||||||
// Maximum number of bytes to be retained.
|
|
||||||
MaxBytes units.Base2Bytes
|
|
||||||
|
|
||||||
// Disable creation and consideration of lockfile.
|
|
||||||
NoLockfile bool
|
|
||||||
|
|
||||||
// When true it disables the overlapping blocks check.
|
|
||||||
// This in-turn enables vertical compaction and vertical query merge.
|
|
||||||
AllowOverlappingBlocks bool
|
|
||||||
|
|
||||||
// When true records in the WAL will be compressed.
|
|
||||||
WALCompression bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
startTime prometheus.GaugeFunc
|
|
||||||
headMaxTime prometheus.GaugeFunc
|
|
||||||
headMinTime prometheus.GaugeFunc
|
|
||||||
)
|
|
||||||
|
|
||||||
func registerMetrics(db *tsdb.DB, r prometheus.Registerer) {
|
|
||||||
|
|
||||||
startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_tsdb_lowest_timestamp_seconds",
|
|
||||||
Help: "Lowest timestamp value stored in the database.",
|
|
||||||
}, func() float64 {
|
|
||||||
bb := db.Blocks()
|
|
||||||
if len(bb) == 0 {
|
|
||||||
return float64(db.Head().MinTime()) / 1000
|
|
||||||
}
|
|
||||||
return float64(db.Blocks()[0].Meta().MinTime) / 1000
|
|
||||||
})
|
|
||||||
headMinTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_tsdb_head_min_time_seconds",
|
|
||||||
Help: "Minimum time bound of the head block.",
|
|
||||||
}, func() float64 {
|
|
||||||
return float64(db.Head().MinTime()) / 1000
|
|
||||||
})
|
|
||||||
headMaxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
|
||||||
Name: "prometheus_tsdb_head_max_time_seconds",
|
|
||||||
Help: "Maximum timestamp of the head block.",
|
|
||||||
}, func() float64 {
|
|
||||||
return float64(db.Head().MaxTime()) / 1000
|
|
||||||
})
|
|
||||||
|
|
||||||
if r != nil {
|
|
||||||
r.MustRegister(
|
|
||||||
startTime,
|
|
||||||
headMaxTime,
|
|
||||||
headMinTime,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open returns a new storage backed by a TSDB database that is configured for Prometheus.
|
|
||||||
func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) {
|
|
||||||
if opts.MinBlockDuration > opts.MaxBlockDuration {
|
|
||||||
opts.MaxBlockDuration = opts.MinBlockDuration
|
|
||||||
}
|
|
||||||
// Start with smallest block duration and create exponential buckets until the exceed the
|
|
||||||
// configured maximum block duration.
|
|
||||||
rngs := tsdb.ExponentialBlockRanges(int64(time.Duration(opts.MinBlockDuration).Seconds()*1000), 10, 3)
|
|
||||||
|
|
||||||
for i, v := range rngs {
|
|
||||||
if v > int64(time.Duration(opts.MaxBlockDuration).Seconds()*1000) {
|
|
||||||
rngs = rngs[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := tsdb.Open(path, l, r, &tsdb.Options{
|
|
||||||
WALSegmentSize: int(opts.WALSegmentSize),
|
|
||||||
RetentionDuration: uint64(time.Duration(opts.RetentionDuration).Seconds() * 1000),
|
|
||||||
MaxBytes: int64(opts.MaxBytes),
|
|
||||||
BlockRanges: rngs,
|
|
||||||
NoLockfile: opts.NoLockfile,
|
|
||||||
AllowOverlappingBlocks: opts.AllowOverlappingBlocks,
|
|
||||||
WALCompression: opts.WALCompression,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
registerMetrics(db, r)
|
|
||||||
|
|
||||||
return db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartTime implements the Storage interface.
|
|
||||||
func (a adapter) StartTime() (int64, error) {
|
|
||||||
var startTime int64
|
|
||||||
|
|
||||||
if len(a.db.Blocks()) > 0 {
|
|
||||||
startTime = a.db.Blocks()[0].Meta().MinTime
|
|
||||||
} else {
|
|
||||||
startTime = time.Now().Unix() * 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a safety margin as it may take a few minutes for everything to spin up.
|
|
||||||
return startTime + a.startTimeMargin, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a adapter) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
|
|
||||||
q, err := a.db.Querier(mint, maxt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return querier{q: q}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Appender returns a new appender against the storage.
|
|
||||||
func (a adapter) Appender() (storage.Appender, error) {
|
|
||||||
return appender{a: a.db.Appender()}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the storage and all its underlying resources.
|
|
||||||
func (a adapter) Close() error {
|
|
||||||
return a.db.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type querier struct {
|
|
||||||
q tsdb.Querier
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q querier) Select(_ *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
|
||||||
set, err := q.q.Select(ms...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return seriesSet{set: set}, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q querier) SelectSorted(_ *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
|
||||||
set, err := q.q.SelectSorted(ms...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return seriesSet{set: set}, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) {
|
|
||||||
v, err := q.q.LabelValues(name)
|
|
||||||
return v, nil, err
|
|
||||||
}
|
|
||||||
func (q querier) LabelNames() ([]string, storage.Warnings, error) {
|
|
||||||
v, err := q.q.LabelNames()
|
|
||||||
return v, nil, err
|
|
||||||
}
|
|
||||||
func (q querier) Close() error { return q.q.Close() }
|
|
||||||
|
|
||||||
type seriesSet struct {
|
|
||||||
set tsdb.SeriesSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s seriesSet) Next() bool { return s.set.Next() }
|
|
||||||
func (s seriesSet) Err() error { return s.set.Err() }
|
|
||||||
func (s seriesSet) At() storage.Series { return series{s: s.set.At()} }
|
|
||||||
|
|
||||||
type series struct {
|
|
||||||
s tsdb.Series
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s series) Labels() labels.Labels { return s.s.Labels() }
|
|
||||||
func (s series) Iterator() storage.SeriesIterator { return s.s.Iterator() }
|
|
||||||
|
|
||||||
type appender struct {
|
|
||||||
a tsdb.Appender
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a appender) Add(lset labels.Labels, t int64, v float64) (uint64, error) {
|
|
||||||
ref, err := a.a.Add(lset, t, v)
|
|
||||||
|
|
||||||
switch errors.Cause(err) {
|
|
||||||
case tsdb.ErrNotFound:
|
|
||||||
return 0, storage.ErrNotFound
|
|
||||||
case tsdb.ErrOutOfOrderSample:
|
|
||||||
return 0, storage.ErrOutOfOrderSample
|
|
||||||
case tsdb.ErrAmendSample:
|
|
||||||
return 0, storage.ErrDuplicateSampleForTimestamp
|
|
||||||
case tsdb.ErrOutOfBounds:
|
|
||||||
return 0, storage.ErrOutOfBounds
|
|
||||||
}
|
|
||||||
return ref, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a appender) AddFast(_ labels.Labels, ref uint64, t int64, v float64) error {
|
|
||||||
err := a.a.AddFast(ref, t, v)
|
|
||||||
|
|
||||||
switch errors.Cause(err) {
|
|
||||||
case tsdb.ErrNotFound:
|
|
||||||
return storage.ErrNotFound
|
|
||||||
case tsdb.ErrOutOfOrderSample:
|
|
||||||
return storage.ErrOutOfOrderSample
|
|
||||||
case tsdb.ErrAmendSample:
|
|
||||||
return storage.ErrDuplicateSampleForTimestamp
|
|
||||||
case tsdb.ErrOutOfBounds:
|
|
||||||
return storage.ErrOutOfBounds
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a appender) Commit() error { return a.a.Commit() }
|
|
||||||
func (a appender) Rollback() error { return a.a.Rollback() }
|
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package tsdb
|
|
||||||
|
|
||||||
// Export the internal variables only for tests.
|
|
||||||
var (
|
|
||||||
StartTime = &startTime
|
|
||||||
HeadMaxTime = &headMaxTime
|
|
||||||
HeadMinTime = &headMinTime
|
|
||||||
)
|
|
|
@ -1,64 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package tsdb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
|
||||||
"github.com/prometheus/prometheus/storage/tsdb"
|
|
||||||
"github.com/prometheus/prometheus/util/teststorage"
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMetrics(t *testing.T) {
|
|
||||||
db := teststorage.New(t)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
metrics := &dto.Metric{}
|
|
||||||
startTime := *tsdb.StartTime
|
|
||||||
headMinTime := *tsdb.HeadMinTime
|
|
||||||
headMaxTime := *tsdb.HeadMaxTime
|
|
||||||
|
|
||||||
// Check initial values.
|
|
||||||
testutil.Ok(t, startTime.Write(metrics))
|
|
||||||
testutil.Equals(t, float64(model.Latest)/1000, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
testutil.Ok(t, headMinTime.Write(metrics))
|
|
||||||
testutil.Equals(t, float64(model.Latest)/1000, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
testutil.Ok(t, headMaxTime.Write(metrics))
|
|
||||||
testutil.Equals(t, float64(model.Earliest)/1000, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
app, err := db.Appender()
|
|
||||||
testutil.Ok(t, err)
|
|
||||||
|
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1, 1)
|
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2, 1)
|
|
||||||
app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 3, 1)
|
|
||||||
testutil.Ok(t, app.Commit())
|
|
||||||
|
|
||||||
// Check after adding some samples.
|
|
||||||
testutil.Ok(t, startTime.Write(metrics))
|
|
||||||
testutil.Equals(t, 0.001, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
testutil.Ok(t, headMinTime.Write(metrics))
|
|
||||||
testutil.Equals(t, 0.001, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
testutil.Ok(t, headMaxTime.Write(metrics))
|
|
||||||
testutil.Equals(t, 0.003, metrics.Gauge.GetValue())
|
|
||||||
|
|
||||||
}
|
|
|
@ -124,12 +124,6 @@ type BlockReader interface {
|
||||||
Meta() BlockMeta
|
Meta() BlockMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appendable defines an entity to which data can be appended.
|
|
||||||
type Appendable interface {
|
|
||||||
// Appender returns a new Appender against an underlying store.
|
|
||||||
Appender() Appender
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockMeta provides meta information about a block.
|
// BlockMeta provides meta information about a block.
|
||||||
type BlockMeta struct {
|
type BlockMeta struct {
|
||||||
// Unique identifier for the block and its contents. Changes on compaction.
|
// Unique identifier for the block and its contents. Changes on compaction.
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||||
|
@ -179,7 +180,7 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
series := newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{sample{1, 1}})
|
series := newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{sample{1, 1}})
|
||||||
blockDir := createBlock(t, tmpdir, []Series{series})
|
blockDir := createBlock(t, tmpdir, []storage.Series{series})
|
||||||
files, err := sequenceFiles(chunkDir(blockDir))
|
files, err := sequenceFiles(chunkDir(blockDir))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Assert(t, len(files) > 0, "No chunk created.")
|
testutil.Assert(t, len(files) > 0, "No chunk created.")
|
||||||
|
@ -202,8 +203,9 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
querier, err := NewBlockQuerier(b, 0, 1)
|
querier, err := NewBlockQuerier(b, 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, querier.Close()) }()
|
defer func() { testutil.Ok(t, querier.Close()) }()
|
||||||
set, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
set, ws, err := querier.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
// Check query err.
|
// Check query err.
|
||||||
testutil.Equals(t, false, set.Next())
|
testutil.Equals(t, false, set.Next())
|
||||||
|
@ -302,7 +304,7 @@ func TestReadIndexFormatV1(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// createBlock creates a block with given set of series and returns its dir.
|
// createBlock creates a block with given set of series and returns its dir.
|
||||||
func createBlock(tb testing.TB, dir string, series []Series) string {
|
func createBlock(tb testing.TB, dir string, series []storage.Series) string {
|
||||||
return createBlockFromHead(tb, dir, createHead(tb, series))
|
return createBlockFromHead(tb, dir, createHead(tb, series))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,7 +321,7 @@ func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
|
||||||
return filepath.Join(dir, ulid.String())
|
return filepath.Join(dir, ulid.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func createHead(tb testing.TB, series []Series) *Head {
|
func createHead(tb testing.TB, series []storage.Series) *Head {
|
||||||
head, err := NewHead(nil, nil, nil, 2*60*60*1000, DefaultStripeSize)
|
head, err := NewHead(nil, nil, nil, 2*60*60*1000, DefaultStripeSize)
|
||||||
testutil.Ok(tb, err)
|
testutil.Ok(tb, err)
|
||||||
defer head.Close()
|
defer head.Close()
|
||||||
|
@ -352,12 +354,12 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// genSeries generates series with a given number of labels and values.
|
// genSeries generates series with a given number of labels and values.
|
||||||
func genSeries(totalSeries, labelCount int, mint, maxt int64) []Series {
|
func genSeries(totalSeries, labelCount int, mint, maxt int64) []storage.Series {
|
||||||
if totalSeries == 0 || labelCount == 0 {
|
if totalSeries == 0 || labelCount == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
series := make([]Series, totalSeries)
|
series := make([]storage.Series, totalSeries)
|
||||||
|
|
||||||
for i := 0; i < totalSeries; i++ {
|
for i := 0; i < totalSeries; i++ {
|
||||||
lbls := make(map[string]string, labelCount)
|
lbls := make(map[string]string, labelCount)
|
||||||
|
@ -375,12 +377,12 @@ func genSeries(totalSeries, labelCount int, mint, maxt int64) []Series {
|
||||||
}
|
}
|
||||||
|
|
||||||
// populateSeries generates series from given labels, mint and maxt.
|
// populateSeries generates series from given labels, mint and maxt.
|
||||||
func populateSeries(lbls []map[string]string, mint, maxt int64) []Series {
|
func populateSeries(lbls []map[string]string, mint, maxt int64) []storage.Series {
|
||||||
if len(lbls) == 0 {
|
if len(lbls) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
series := make([]Series, 0, len(lbls))
|
series := make([]storage.Series, 0, len(lbls))
|
||||||
for _, lbl := range lbls {
|
for _, lbl := range lbls {
|
||||||
if len(lbl) == 0 {
|
if len(lbl) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -15,6 +15,7 @@ package chunkenc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -72,10 +73,21 @@ type Appender interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterator is a simple iterator that can only get the next value.
|
// Iterator is a simple iterator that can only get the next value.
|
||||||
|
// Iterator iterates over the samples of a time series.
|
||||||
type Iterator interface {
|
type Iterator interface {
|
||||||
At() (int64, float64)
|
// Next advances the iterator by one.
|
||||||
Err() error
|
|
||||||
Next() bool
|
Next() bool
|
||||||
|
// Seek advances the iterator forward to the first sample with the timestamp equal or greater than t.
|
||||||
|
// If current sample found by previous `Next` or `Seek` operation already has this property, Seek has no effect.
|
||||||
|
// Seek returns true, if such sample exists, false otherwise.
|
||||||
|
// Iterator is exhausted when the Seek returns false.
|
||||||
|
Seek(t int64) bool
|
||||||
|
// At returns the current timestamp/value pair.
|
||||||
|
// Before the iterator has advanced At behaviour is unspecified.
|
||||||
|
At() (int64, float64)
|
||||||
|
// Err returns the current error. It should be used only after iterator is
|
||||||
|
// exhausted, that is `Next` or `Seek` returns false.
|
||||||
|
Err() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNopIterator returns a new chunk iterator that does not hold any data.
|
// NewNopIterator returns a new chunk iterator that does not hold any data.
|
||||||
|
@ -85,7 +97,8 @@ func NewNopIterator() Iterator {
|
||||||
|
|
||||||
type nopIterator struct{}
|
type nopIterator struct{}
|
||||||
|
|
||||||
func (nopIterator) At() (int64, float64) { return 0, 0 }
|
func (nopIterator) Seek(int64) bool { return false }
|
||||||
|
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
|
||||||
func (nopIterator) Next() bool { return false }
|
func (nopIterator) Next() bool { return false }
|
||||||
func (nopIterator) Err() error { return nil }
|
func (nopIterator) Err() error { return nil }
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
@ -35,19 +34,15 @@ func TestChunk(t *testing.T) {
|
||||||
t.Run(fmt.Sprintf("%v", enc), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%v", enc), func(t *testing.T) {
|
||||||
for range make([]struct{}, 1) {
|
for range make([]struct{}, 1) {
|
||||||
c := nc()
|
c := nc()
|
||||||
if err := testChunk(c); err != nil {
|
testChunk(t, c)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testChunk(c Chunk) error {
|
func testChunk(t *testing.T, c Chunk) {
|
||||||
app, err := c.Appender()
|
app, err := c.Appender()
|
||||||
if err != nil {
|
testutil.Ok(t, err)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var exp []pair
|
var exp []pair
|
||||||
var (
|
var (
|
||||||
|
@ -56,7 +51,6 @@ func testChunk(c Chunk) error {
|
||||||
)
|
)
|
||||||
for i := 0; i < 300; i++ {
|
for i := 0; i < 300; i++ {
|
||||||
ts += int64(rand.Intn(10000) + 1)
|
ts += int64(rand.Intn(10000) + 1)
|
||||||
// v = rand.Float64()
|
|
||||||
if i%2 == 0 {
|
if i%2 == 0 {
|
||||||
v += float64(rand.Intn(1000000))
|
v += float64(rand.Intn(1000000))
|
||||||
} else {
|
} else {
|
||||||
|
@ -67,29 +61,52 @@ func testChunk(c Chunk) error {
|
||||||
// appending to a partially filled chunk.
|
// appending to a partially filled chunk.
|
||||||
if i%10 == 0 {
|
if i%10 == 0 {
|
||||||
app, err = c.Appender()
|
app, err = c.Appender()
|
||||||
if err != nil {
|
testutil.Ok(t, err)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Append(ts, v)
|
app.Append(ts, v)
|
||||||
exp = append(exp, pair{t: ts, v: v})
|
exp = append(exp, pair{t: ts, v: v})
|
||||||
// fmt.Println("appended", len(c.Bytes()), c.Bytes())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
it := c.Iterator(nil)
|
// 1. Expand iterator in simple case.
|
||||||
var res []pair
|
it1 := c.Iterator(nil)
|
||||||
for it.Next() {
|
var res1 []pair
|
||||||
ts, v := it.At()
|
for it1.Next() {
|
||||||
res = append(res, pair{t: ts, v: v})
|
ts, v := it1.At()
|
||||||
|
res1 = append(res1, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
if it.Err() != nil {
|
testutil.Ok(t, it1.Err())
|
||||||
return it.Err()
|
testutil.Equals(t, exp, res1)
|
||||||
|
|
||||||
|
// 2. Expand second iterator while reusing first one.
|
||||||
|
it2 := c.Iterator(it1)
|
||||||
|
var res2 []pair
|
||||||
|
for it2.Next() {
|
||||||
|
ts, v := it2.At()
|
||||||
|
res2 = append(res2, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(exp, res) {
|
testutil.Ok(t, it2.Err())
|
||||||
return fmt.Errorf("unexpected result\n\ngot: %v\n\nexp: %v", res, exp)
|
testutil.Equals(t, exp, res2)
|
||||||
|
|
||||||
|
// 3. Test iterator Seek.
|
||||||
|
mid := len(exp) / 2
|
||||||
|
|
||||||
|
it3 := c.Iterator(nil)
|
||||||
|
var res3 []pair
|
||||||
|
testutil.Equals(t, true, it3.Seek(exp[mid].t))
|
||||||
|
// Below ones should not matter.
|
||||||
|
testutil.Equals(t, true, it3.Seek(exp[mid].t))
|
||||||
|
testutil.Equals(t, true, it3.Seek(exp[mid].t))
|
||||||
|
ts, v = it3.At()
|
||||||
|
res3 = append(res3, pair{t: ts, v: v})
|
||||||
|
|
||||||
|
for it3.Next() {
|
||||||
|
ts, v := it3.At()
|
||||||
|
res3 = append(res3, pair{t: ts, v: v})
|
||||||
}
|
}
|
||||||
return nil
|
testutil.Ok(t, it3.Err())
|
||||||
|
testutil.Equals(t, exp[mid:], res3)
|
||||||
|
testutil.Equals(t, false, it3.Seek(exp[len(exp)-1].t+1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
|
func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
|
||||||
|
|
|
@ -253,6 +253,19 @@ type xorIterator struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (it *xorIterator) Seek(t int64) bool {
|
||||||
|
if it.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for t > it.t || it.numRead == 0 {
|
||||||
|
if !it.Next() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (it *xorIterator) At() (int64, float64) {
|
func (it *xorIterator) At() (int64, float64) {
|
||||||
return it.t, it.val
|
return it.t, it.val
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -36,7 +37,7 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
"gopkg.in/alecthomas/kingpin.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -176,8 +177,8 @@ func (b *writeBenchmark) run() error {
|
||||||
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||||
|
|
||||||
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
||||||
RetentionDuration: 15 * 24 * 60 * 60 * 1000, // 15 days in milliseconds
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||||
BlockRanges: tsdb.ExponentialBlockRanges(2*60*60*1000, 5, 3),
|
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -604,8 +605,7 @@ func analyzeBlock(b tsdb.BlockReader, limit int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) (err error) {
|
func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) (err error) {
|
||||||
|
q, err := db.Querier(context.TODO(), mint, maxt)
|
||||||
q, err := db.Querier(mint, maxt)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -616,11 +616,19 @@ func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) (err error) {
|
||||||
err = merr.Err()
|
err = merr.Err()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ss, err := q.Select(labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
ss, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(ws) > 0 {
|
||||||
|
var merr tsdb_errors.MultiError
|
||||||
|
for _, w := range ws {
|
||||||
|
merr.Add(w)
|
||||||
|
}
|
||||||
|
return merr.Err()
|
||||||
|
}
|
||||||
|
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
labels := series.Labels()
|
labels := series.Labels()
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
@ -649,7 +650,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
set ChunkSeriesSet
|
set storage.ChunkSeriesSet
|
||||||
symbols index.StringIter
|
symbols index.StringIter
|
||||||
closers = []io.Closer{}
|
closers = []io.Closer{}
|
||||||
overlapping bool
|
overlapping bool
|
||||||
|
@ -916,7 +917,7 @@ func (c *compactionSeriesSet) At() (labels.Labels, []chunks.Meta, tombstones.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
type compactionMerger struct {
|
type compactionMerger struct {
|
||||||
a, b ChunkSeriesSet
|
a, b storage.ChunkSeriesSet
|
||||||
|
|
||||||
aok, bok bool
|
aok, bok bool
|
||||||
l labels.Labels
|
l labels.Labels
|
||||||
|
@ -924,7 +925,7 @@ type compactionMerger struct {
|
||||||
intervals tombstones.Intervals
|
intervals tombstones.Intervals
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCompactionMerger(a, b ChunkSeriesSet) (*compactionMerger, error) {
|
func newCompactionMerger(a, b storage.ChunkSeriesSet) (*compactionMerger, error) {
|
||||||
c := &compactionMerger{
|
c := &compactionMerger{
|
||||||
a: a,
|
a: a,
|
||||||
b: b,
|
b: b,
|
||||||
|
|
|
@ -895,13 +895,13 @@ func BenchmarkCompactionFromHead(b *testing.B) {
|
||||||
// This is needed for unit tests that rely on
|
// This is needed for unit tests that rely on
|
||||||
// checking state before and after a compaction.
|
// checking state before and after a compaction.
|
||||||
func TestDisableAutoCompactions(t *testing.T) {
|
func TestDisableAutoCompactions(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
blockRange := DefaultOptions.BlockRanges[0]
|
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
|
||||||
label := labels.FromStrings("foo", "bar")
|
label := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
// Trigger a compaction to check that it was skipped and
|
// Trigger a compaction to check that it was skipped and
|
||||||
|
@ -971,7 +971,7 @@ func TestCancelCompactions(t *testing.T) {
|
||||||
// Measure the compaction time without interrupting it.
|
// Measure the compaction time without interrupting it.
|
||||||
var timeCompactionUninterrupted time.Duration
|
var timeCompactionUninterrupted time.Duration
|
||||||
{
|
{
|
||||||
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
db, err := open(tmpdir, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000})
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
||||||
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
||||||
|
@ -991,7 +991,7 @@ func TestCancelCompactions(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Measure the compaction time when closing the db in the middle of compaction.
|
// Measure the compaction time when closing the db in the middle of compaction.
|
||||||
{
|
{
|
||||||
db, err := Open(tmpdirCopy, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})
|
db, err := open(tmpdirCopy, log.NewNopLogger(), nil, DefaultOptions(), []int64{1, 2000})
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
testutil.Equals(t, 3, len(db.Blocks()), "initial block count mismatch")
|
||||||
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
testutil.Equals(t, 0.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.ran), "initial compaction counter mismatch")
|
||||||
|
@ -1017,10 +1017,9 @@ func TestCancelCompactions(t *testing.T) {
|
||||||
// TestDeleteCompactionBlockAfterFailedReload ensures that a failed reload immediately after a compaction
|
// TestDeleteCompactionBlockAfterFailedReload ensures that a failed reload immediately after a compaction
|
||||||
// deletes the resulting block to avoid creatings blocks with the same time range.
|
// deletes the resulting block to avoid creatings blocks with the same time range.
|
||||||
func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
||||||
|
|
||||||
tests := map[string]func(*DB) int{
|
tests := map[string]func(*DB) int{
|
||||||
"Test Head Compaction": func(db *DB) int {
|
"Test Head Compaction": func(db *DB) int {
|
||||||
rangeToTriggerCompaction := db.opts.BlockRanges[0]/2*3 - 1
|
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 - 1
|
||||||
defaultLabel := labels.FromStrings("foo", "bar")
|
defaultLabel := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
// Add some data to the head that is enough to trigger a compaction.
|
// Add some data to the head that is enough to trigger a compaction.
|
||||||
|
@ -1053,12 +1052,10 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
||||||
|
|
||||||
for title, bootStrap := range tests {
|
for title, bootStrap := range tests {
|
||||||
t.Run(title, func(t *testing.T) {
|
t.Run(title, func(t *testing.T) {
|
||||||
db, delete := openTestDB(t, &Options{
|
db, closeFn := openTestDB(t, nil, []int64{1, 100})
|
||||||
BlockRanges: []int64{1, 100},
|
|
||||||
})
|
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
db.DisableCompactions()
|
db.DisableCompactions()
|
||||||
|
|
||||||
|
|
150
tsdb/db.go
150
tsdb/db.go
|
@ -35,6 +35,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
|
@ -43,21 +44,29 @@ import (
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Default duration of a block in milliseconds - 2h.
|
|
||||||
const (
|
const (
|
||||||
DefaultBlockDuration = int64(2 * 60 * 60 * 1000)
|
// Default duration of a block in milliseconds.
|
||||||
|
DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotReady is returned if the underlying storage is not ready yet.
|
||||||
|
ErrNotReady = errors.New("TSDB not ready")
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultOptions used for the DB. They are sane for setups using
|
// DefaultOptions used for the DB. They are sane for setups using
|
||||||
// millisecond precision timestamps.
|
// millisecond precision timestamps.
|
||||||
var DefaultOptions = &Options{
|
func DefaultOptions() *Options {
|
||||||
WALSegmentSize: wal.DefaultSegmentSize,
|
return &Options{
|
||||||
RetentionDuration: 15 * 24 * 60 * 60 * 1000, // 15 days in milliseconds
|
WALSegmentSize: wal.DefaultSegmentSize,
|
||||||
BlockRanges: ExponentialBlockRanges(DefaultBlockDuration, 3, 5),
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||||
NoLockfile: false,
|
MinBlockDuration: DefaultBlockDuration,
|
||||||
AllowOverlappingBlocks: false,
|
MaxBlockDuration: DefaultBlockDuration,
|
||||||
WALCompression: false,
|
NoLockfile: false,
|
||||||
StripeSize: DefaultStripeSize,
|
AllowOverlappingBlocks: false,
|
||||||
|
WALCompression: false,
|
||||||
|
StripeSize: DefaultStripeSize,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options of the DB storage.
|
// Options of the DB storage.
|
||||||
|
@ -69,7 +78,9 @@ type Options struct {
|
||||||
WALSegmentSize int
|
WALSegmentSize int
|
||||||
|
|
||||||
// Duration of persisted data to keep.
|
// Duration of persisted data to keep.
|
||||||
RetentionDuration uint64
|
// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
|
||||||
|
// Typically it is in milliseconds.
|
||||||
|
RetentionDuration int64
|
||||||
|
|
||||||
// Maximum number of bytes in blocks to be retained.
|
// Maximum number of bytes in blocks to be retained.
|
||||||
// 0 or less means disabled.
|
// 0 or less means disabled.
|
||||||
|
@ -78,9 +89,6 @@ type Options struct {
|
||||||
// the current size of the database.
|
// the current size of the database.
|
||||||
MaxBytes int64
|
MaxBytes int64
|
||||||
|
|
||||||
// The sizes of the Blocks.
|
|
||||||
BlockRanges []int64
|
|
||||||
|
|
||||||
// NoLockfile disables creation and consideration of a lock file.
|
// NoLockfile disables creation and consideration of a lock file.
|
||||||
NoLockfile bool
|
NoLockfile bool
|
||||||
|
|
||||||
|
@ -93,31 +101,17 @@ type Options struct {
|
||||||
|
|
||||||
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
|
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
|
||||||
StripeSize int
|
StripeSize int
|
||||||
}
|
|
||||||
|
|
||||||
// Appender allows appending a batch of data. It must be completed with a
|
// The timestamp range of head blocks after which they get persisted.
|
||||||
// call to Commit or Rollback and must not be reused afterwards.
|
// It's the minimum duration of any persisted block.
|
||||||
//
|
// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
|
||||||
// Operations on the Appender interface are not goroutine-safe.
|
// Typically it is in milliseconds.
|
||||||
type Appender interface {
|
MinBlockDuration int64
|
||||||
// Add adds a sample pair for the given series. A reference number is
|
|
||||||
// returned which can be used to add further samples in the same or later
|
|
||||||
// transactions.
|
|
||||||
// Returned reference numbers are ephemeral and may be rejected in calls
|
|
||||||
// to AddFast() at any point. Adding the sample via Add() returns a new
|
|
||||||
// reference number.
|
|
||||||
// If the reference is 0 it must not be used for caching.
|
|
||||||
Add(l labels.Labels, t int64, v float64) (uint64, error)
|
|
||||||
|
|
||||||
// AddFast adds a sample pair for the referenced series. It is generally
|
// The maximum timestamp range of compacted blocks.
|
||||||
// faster than adding a sample by providing its full label set.
|
// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
|
||||||
AddFast(ref uint64, t int64, v float64) error
|
// Typically it is in milliseconds.
|
||||||
|
MaxBlockDuration int64
|
||||||
// Commit submits the collected samples and purges the batch.
|
|
||||||
Commit() error
|
|
||||||
|
|
||||||
// Rollback rolls back all modifications made in the appender so far.
|
|
||||||
Rollback() error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DB handles reads and writes of time series falling into
|
// DB handles reads and writes of time series falling into
|
||||||
|
@ -329,7 +323,13 @@ func (db *DBReadOnly) FlushWAL(dir string) error {
|
||||||
mint: mint,
|
mint: mint,
|
||||||
maxt: maxt,
|
maxt: maxt,
|
||||||
}
|
}
|
||||||
compactor, err := NewLeveledCompactor(context.Background(), nil, db.logger, DefaultOptions.BlockRanges, chunkenc.NewPool())
|
compactor, err := NewLeveledCompactor(
|
||||||
|
context.Background(),
|
||||||
|
nil,
|
||||||
|
db.logger,
|
||||||
|
ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
|
||||||
|
chunkenc.NewPool(),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "create leveled compactor")
|
return errors.Wrap(err, "create leveled compactor")
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ func (db *DBReadOnly) FlushWAL(dir string) error {
|
||||||
|
|
||||||
// Querier loads the wal and returns a new querier over the data partition for the given time range.
|
// Querier loads the wal and returns a new querier over the data partition for the given time range.
|
||||||
// Current implementation doesn't support multiple Queriers.
|
// Current implementation doesn't support multiple Queriers.
|
||||||
func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) {
|
func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
||||||
select {
|
select {
|
||||||
case <-db.closed:
|
case <-db.closed:
|
||||||
return nil, ErrClosed
|
return nil, ErrClosed
|
||||||
|
@ -402,7 +402,7 @@ func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) {
|
||||||
head: head,
|
head: head,
|
||||||
}
|
}
|
||||||
|
|
||||||
return dbWritable.Querier(mint, maxt)
|
return dbWritable.Querier(ctx, mint, maxt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Blocks returns a slice of block readers for persisted blocks.
|
// Blocks returns a slice of block readers for persisted blocks.
|
||||||
|
@ -481,20 +481,51 @@ func (db *DBReadOnly) Close() error {
|
||||||
return merr.Err()
|
return merr.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open returns a new DB in the given directory.
|
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
|
||||||
func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db *DB, err error) {
|
func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db *DB, err error) {
|
||||||
|
var rngs []int64
|
||||||
|
opts, rngs = validateOpts(opts, nil)
|
||||||
|
return open(dir, l, r, opts, rngs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
|
||||||
|
if opts == nil {
|
||||||
|
opts = DefaultOptions()
|
||||||
|
}
|
||||||
|
if opts.StripeSize <= 0 {
|
||||||
|
opts.StripeSize = DefaultStripeSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MinBlockDuration <= 0 {
|
||||||
|
opts.MinBlockDuration = DefaultBlockDuration
|
||||||
|
}
|
||||||
|
if opts.MinBlockDuration > opts.MaxBlockDuration {
|
||||||
|
opts.MaxBlockDuration = opts.MinBlockDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rngs) == 0 {
|
||||||
|
// Start with smallest block duration and create exponential buckets until the exceed the
|
||||||
|
// configured maximum block duration.
|
||||||
|
rngs = ExponentialBlockRanges(opts.MinBlockDuration, 10, 3)
|
||||||
|
}
|
||||||
|
return opts, rngs
|
||||||
|
}
|
||||||
|
|
||||||
|
func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64) (db *DB, err error) {
|
||||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
if opts == nil {
|
|
||||||
opts = DefaultOptions
|
for i, v := range rngs {
|
||||||
}
|
if v > opts.MaxBlockDuration {
|
||||||
if opts.StripeSize <= 0 {
|
rngs = rngs[:i]
|
||||||
opts.StripeSize = DefaultStripeSize
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixup bad format written by Prometheus 2.1.
|
// Fixup bad format written by Prometheus 2.1.
|
||||||
if err := repairBadIndexVersion(l, dir); err != nil {
|
if err := repairBadIndexVersion(l, dir); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -535,7 +566,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
db.compactor, err = NewLeveledCompactor(ctx, r, l, opts.BlockRanges, db.chunkPool)
|
db.compactor, err = NewLeveledCompactor(ctx, r, l, rngs, db.chunkPool)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
return nil, errors.Wrap(err, "create leveled compactor")
|
return nil, errors.Wrap(err, "create leveled compactor")
|
||||||
|
@ -556,7 +587,7 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.head, err = NewHead(r, l, wlog, opts.BlockRanges[0], opts.StripeSize)
|
db.head, err = NewHead(r, l, wlog, rngs[0], opts.StripeSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -585,6 +616,17 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db
|
||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartTime implements the Storage interface.
|
||||||
|
func (db *DB) StartTime() (int64, error) {
|
||||||
|
db.mtx.RLock()
|
||||||
|
defer db.mtx.RUnlock()
|
||||||
|
|
||||||
|
if len(db.blocks) > 0 {
|
||||||
|
return db.blocks[0].Meta().MinTime, nil
|
||||||
|
}
|
||||||
|
return db.head.MinTime(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Dir returns the directory of the database.
|
// Dir returns the directory of the database.
|
||||||
func (db *DB) Dir() string {
|
func (db *DB) Dir() string {
|
||||||
return db.dir
|
return db.dir
|
||||||
|
@ -630,14 +672,14 @@ func (db *DB) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender opens a new appender against the database.
|
// Appender opens a new appender against the database.
|
||||||
func (db *DB) Appender() Appender {
|
func (db *DB) Appender() storage.Appender {
|
||||||
return dbAppender{db: db, Appender: db.head.Appender()}
|
return dbAppender{db: db, Appender: db.head.Appender()}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
||||||
// if necessary.
|
// if necessary.
|
||||||
type dbAppender struct {
|
type dbAppender struct {
|
||||||
Appender
|
storage.Appender
|
||||||
db *DB
|
db *DB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -948,7 +990,7 @@ func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Blo
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
// The difference between the first block and this block is larger than
|
// The difference between the first block and this block is larger than
|
||||||
// the retention period so any blocks after that are added as deletable.
|
// the retention period so any blocks after that are added as deletable.
|
||||||
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > int64(db.opts.RetentionDuration) {
|
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration {
|
||||||
for _, b := range blocks[i:] {
|
for _, b := range blocks[i:] {
|
||||||
deletable[b.meta.ULID] = b
|
deletable[b.meta.ULID] = b
|
||||||
}
|
}
|
||||||
|
@ -973,7 +1015,7 @@ func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Blo
|
||||||
blocksSize := walSize
|
blocksSize := walSize
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
blocksSize += block.Size()
|
blocksSize += block.Size()
|
||||||
if blocksSize > db.opts.MaxBytes {
|
if blocksSize > int64(db.opts.MaxBytes) {
|
||||||
// Add this and all following blocks for deletion.
|
// Add this and all following blocks for deletion.
|
||||||
for _, b := range blocks[i:] {
|
for _, b := range blocks[i:] {
|
||||||
deletable[b.meta.ULID] = b
|
deletable[b.meta.ULID] = b
|
||||||
|
@ -1227,7 +1269,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error {
|
||||||
|
|
||||||
// Querier returns a new querier over the data partition for the given time range.
|
// Querier returns a new querier over the data partition for the given time range.
|
||||||
// A goroutine must not handle more than one open Querier.
|
// A goroutine must not handle more than one open Querier.
|
||||||
func (db *DB) Querier(mint, maxt int64) (Querier, error) {
|
func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) {
|
||||||
var blocks []BlockReader
|
var blocks []BlockReader
|
||||||
var blockMetas []BlockMeta
|
var blockMetas []BlockMeta
|
||||||
|
|
||||||
|
@ -1248,7 +1290,7 @@ func (db *DB) Querier(mint, maxt int64) (Querier, error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
blockQueriers := make([]Querier, 0, len(blocks))
|
blockQueriers := make([]storage.Querier, 0, len(blocks))
|
||||||
for _, b := range blocks {
|
for _, b := range blocks {
|
||||||
q, err := NewBlockQuerier(b, mint, maxt)
|
q, err := NewBlockQuerier(b, mint, maxt)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
305
tsdb/db_test.go
305
tsdb/db_test.go
|
@ -14,6 +14,7 @@
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
|
@ -29,15 +30,15 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
|
||||||
|
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/oklog/ulid"
|
"github.com/oklog/ulid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
"github.com/prometheus/prometheus/tsdb/record"
|
"github.com/prometheus/prometheus/tsdb/record"
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
|
@ -46,11 +47,16 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func openTestDB(t testing.TB, opts *Options) (db *DB, close func()) {
|
func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB, close func()) {
|
||||||
tmpdir, err := ioutil.TempDir("", "test")
|
tmpdir, err := ioutil.TempDir("", "test")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
db, err = Open(tmpdir, nil, nil, opts)
|
if len(rngs) == 0 {
|
||||||
|
db, err = Open(tmpdir, nil, nil, opts)
|
||||||
|
} else {
|
||||||
|
opts, rngs = validateOpts(opts, rngs)
|
||||||
|
db, err = open(tmpdir, nil, nil, opts, rngs)
|
||||||
|
}
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
// Do not close the test database by default as it will deadlock on test failures.
|
// Do not close the test database by default as it will deadlock on test failures.
|
||||||
|
@ -60,12 +66,13 @@ func openTestDB(t testing.TB, opts *Options) (db *DB, close func()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// query runs a matcher query against the querier and fully expands its data.
|
// query runs a matcher query against the querier and fully expands its data.
|
||||||
func query(t testing.TB, q Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample {
|
func query(t testing.TB, q storage.Querier, matchers ...*labels.Matcher) map[string][]tsdbutil.Sample {
|
||||||
ss, err := q.Select(matchers...)
|
ss, ws, err := q.Select(nil, matchers...)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, q.Close())
|
testutil.Ok(t, q.Close())
|
||||||
}()
|
}()
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
result := map[string][]tsdbutil.Sample{}
|
result := map[string][]tsdbutil.Sample{}
|
||||||
|
|
||||||
|
@ -91,10 +98,10 @@ func query(t testing.TB, q Querier, matchers ...*labels.Matcher) map[string][]ts
|
||||||
// Ensure that blocks are held in memory in their time order
|
// Ensure that blocks are held in memory in their time order
|
||||||
// and not in ULID order as they are read from the directory.
|
// and not in ULID order as they are read from the directory.
|
||||||
func TestDB_reloadOrder(t *testing.T) {
|
func TestDB_reloadOrder(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
metas := []BlockMeta{
|
metas := []BlockMeta{
|
||||||
|
@ -118,10 +125,10 @@ func TestDB_reloadOrder(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -129,7 +136,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
_, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
_, err := app.Add(labels.FromStrings("foo", "bar"), 0, 0)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
querier, err := db.Querier(0, 1)
|
querier, err := db.Querier(context.TODO(), 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
testutil.Equals(t, map[string][]tsdbutil.Sample{}, seriesSet)
|
testutil.Equals(t, map[string][]tsdbutil.Sample{}, seriesSet)
|
||||||
|
@ -137,7 +144,7 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
err = app.Commit()
|
err = app.Commit()
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
querier, err = db.Querier(0, 1)
|
querier, err = db.Querier(context.TODO(), 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer querier.Close()
|
defer querier.Close()
|
||||||
|
|
||||||
|
@ -147,10 +154,10 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDataNotAvailableAfterRollback(t *testing.T) {
|
func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -160,7 +167,7 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||||
err = app.Rollback()
|
err = app.Rollback()
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
querier, err := db.Querier(0, 1)
|
querier, err := db.Querier(context.TODO(), 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer querier.Close()
|
defer querier.Close()
|
||||||
|
|
||||||
|
@ -170,10 +177,10 @@ func TestDataNotAvailableAfterRollback(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDBAppenderAddRef(t *testing.T) {
|
func TestDBAppenderAddRef(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app1 := db.Appender()
|
app1 := db.Appender()
|
||||||
|
@ -208,7 +215,7 @@ func TestDBAppenderAddRef(t *testing.T) {
|
||||||
|
|
||||||
testutil.Ok(t, app2.Commit())
|
testutil.Ok(t, app2.Commit())
|
||||||
|
|
||||||
q, err := db.Querier(0, 200)
|
q, err := db.Querier(context.TODO(), 0, 200)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
@ -225,10 +232,10 @@ func TestDBAppenderAddRef(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAppendEmptyLabelsIgnored(t *testing.T) {
|
func TestAppendEmptyLabelsIgnored(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app1 := db.Appender()
|
app1 := db.Appender()
|
||||||
|
@ -278,10 +285,10 @@ func TestDeleteSimple(t *testing.T) {
|
||||||
|
|
||||||
Outer:
|
Outer:
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -301,18 +308,19 @@ Outer:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare the result.
|
// Compare the result.
|
||||||
q, err := db.Querier(0, numSamples)
|
q, err := db.Querier(context.TODO(), 0, numSamples)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
res, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{"a": "b"}, expSamples),
|
newSeries(map[string]string{"a": "b"}, expSamples),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -338,10 +346,10 @@ Outer:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAmendDatapointCausesError(t *testing.T) {
|
func TestAmendDatapointCausesError(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -356,10 +364,10 @@ func TestAmendDatapointCausesError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
|
func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -373,11 +381,12 @@ func TestDuplicateNaNDatapointNoAmendError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
|
func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001))
|
_, err := app.Add(labels.Labels{}, 0, math.Float64frombits(0x7ff0000000000001))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
@ -389,10 +398,10 @@ func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Append AmendedValue.
|
// Append AmendedValue.
|
||||||
|
@ -404,7 +413,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
testutil.Ok(t, app.Commit())
|
testutil.Ok(t, app.Commit())
|
||||||
|
|
||||||
// Make sure the right value is stored.
|
// Make sure the right value is stored.
|
||||||
q, err := db.Querier(0, 10)
|
q, err := db.Querier(context.TODO(), 0, 10)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
@ -421,7 +430,7 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, app.Commit())
|
testutil.Ok(t, app.Commit())
|
||||||
|
|
||||||
q, err = db.Querier(0, 10)
|
q, err = db.Querier(context.TODO(), 0, 10)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
ssMap = query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
|
@ -432,8 +441,8 @@ func TestSkippingInvalidValuesInSameTxn(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDB_Snapshot(t *testing.T) {
|
func TestDB_Snapshot(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
// append data
|
// append data
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -460,13 +469,14 @@ func TestDB_Snapshot(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, db.Close()) }()
|
defer func() { testutil.Ok(t, db.Close()) }()
|
||||||
|
|
||||||
querier, err := db.Querier(mint, mint+1000)
|
querier, err := db.Querier(context.TODO(), mint, mint+1000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, querier.Close()) }()
|
defer func() { testutil.Ok(t, querier.Close()) }()
|
||||||
|
|
||||||
// sum values
|
// sum values
|
||||||
seriesSet, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet, ws, err := querier.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
|
@ -485,8 +495,8 @@ func TestDB_Snapshot(t *testing.T) {
|
||||||
// that are outside the set block time range.
|
// that are outside the set block time range.
|
||||||
// See https://github.com/prometheus/prometheus/issues/5105
|
// See https://github.com/prometheus/prometheus/issues/5105
|
||||||
func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
|
func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
mint := int64(1414141414000)
|
mint := int64(1414141414000)
|
||||||
|
@ -514,13 +524,14 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, db.Close()) }()
|
defer func() { testutil.Ok(t, db.Close()) }()
|
||||||
|
|
||||||
querier, err := db.Querier(mint, mint+1000)
|
querier, err := db.Querier(context.TODO(), mint, mint+1000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, querier.Close()) }()
|
defer func() { testutil.Ok(t, querier.Close()) }()
|
||||||
|
|
||||||
// Sum values.
|
// Sum values.
|
||||||
seriesSet, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
seriesSet, ws, err := querier.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
|
@ -540,8 +551,8 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) {
|
||||||
func TestDB_SnapshotWithDelete(t *testing.T) {
|
func TestDB_SnapshotWithDelete(t *testing.T) {
|
||||||
numSamples := int64(10)
|
numSamples := int64(10)
|
||||||
|
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
|
||||||
|
@ -586,19 +597,20 @@ Outer:
|
||||||
defer func() { testutil.Ok(t, db.Close()) }()
|
defer func() { testutil.Ok(t, db.Close()) }()
|
||||||
|
|
||||||
// Compare the result.
|
// Compare the result.
|
||||||
q, err := db.Querier(0, numSamples)
|
q, err := db.Querier(context.TODO(), 0, numSamples)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, q.Close()) }()
|
defer func() { testutil.Ok(t, q.Close()) }()
|
||||||
|
|
||||||
res, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{"a": "b"}, expSamples),
|
newSeries(map[string]string{"a": "b"}, expSamples),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -683,10 +695,10 @@ func TestDB_e2e(t *testing.T) {
|
||||||
seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{}
|
seriesMap[labels.New(l...).String()] = []tsdbutil.Sample{}
|
||||||
}
|
}
|
||||||
|
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
@ -760,11 +772,12 @@ func TestDB_e2e(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
q, err := db.Querier(mint, maxt)
|
q, err := db.Querier(context.TODO(), mint, maxt)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
ss, err := q.Select(qry.ms...)
|
ss, ws, err := q.Select(nil, qry.ms...)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
result := map[string][]tsdbutil.Sample{}
|
result := map[string][]tsdbutil.Sample{}
|
||||||
|
|
||||||
|
@ -788,8 +801,8 @@ func TestDB_e2e(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWALFlushedOnDBClose(t *testing.T) {
|
func TestWALFlushedOnDBClose(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
dirDb := db.Dir()
|
dirDb := db.Dir()
|
||||||
|
|
||||||
|
@ -806,11 +819,12 @@ func TestWALFlushedOnDBClose(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, db.Close()) }()
|
defer func() { testutil.Ok(t, db.Close()) }()
|
||||||
|
|
||||||
q, err := db.Querier(0, 1)
|
q, err := db.Querier(context.TODO(), 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
values, err := q.LabelValues("labelname")
|
values, ws, err := q.LabelValues("labelname")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
testutil.Equals(t, []string{"labelvalue"}, values)
|
testutil.Equals(t, []string{"labelvalue"}, values)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -821,10 +835,10 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
files, err := ioutil.ReadDir(filepath.Join(dbDir, "wal"))
|
files, err := ioutil.ReadDir(filepath.Join(dbDir, "wal"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
for _, f := range files[:len(files)-1] {
|
for _, f := range files[:len(files)-1] {
|
||||||
testutil.Equals(t, int64(DefaultOptions.WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
|
testutil.Equals(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
|
||||||
}
|
}
|
||||||
lastFile := files[len(files)-1]
|
lastFile := files[len(files)-1]
|
||||||
testutil.Assert(t, int64(DefaultOptions.WALSegmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
|
testutil.Assert(t, int64(DefaultOptions().WALSegmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
|
||||||
},
|
},
|
||||||
// Custom Wal Size.
|
// Custom Wal Size.
|
||||||
2 * 32 * 1024: func(dbDir string, segmentSize int) {
|
2 * 32 * 1024: func(dbDir string, segmentSize int) {
|
||||||
|
@ -846,10 +860,11 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
}
|
}
|
||||||
for segmentSize, testFunc := range tests {
|
for segmentSize, testFunc := range tests {
|
||||||
t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
|
t.Run(fmt.Sprintf("WALSegmentSize %d test", segmentSize), func(t *testing.T) {
|
||||||
options := *DefaultOptions
|
opts := DefaultOptions()
|
||||||
options.WALSegmentSize = segmentSize
|
opts.WALSegmentSize = segmentSize
|
||||||
db, delete := openTestDB(t, &options)
|
db, closeFn := openTestDB(t, opts, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
for i := int64(0); i < 155; i++ {
|
for i := int64(0); i < 155; i++ {
|
||||||
_, err := app.Add(labels.Labels{labels.Label{Name: "wal", Value: "size"}}, i, rand.Float64())
|
_, err := app.Add(labels.Labels{labels.Label{Name: "wal", Value: "size"}}, i, rand.Float64())
|
||||||
|
@ -858,8 +873,8 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
dbDir := db.Dir()
|
dbDir := db.Dir()
|
||||||
db.Close()
|
testutil.Ok(t, db.Close())
|
||||||
testFunc(dbDir, options.WALSegmentSize)
|
testFunc(dbDir, int(opts.WALSegmentSize))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -867,8 +882,8 @@ func TestWALSegmentSizeOptions(t *testing.T) {
|
||||||
func TestTombstoneClean(t *testing.T) {
|
func TestTombstoneClean(t *testing.T) {
|
||||||
numSamples := int64(10)
|
numSamples := int64(10)
|
||||||
|
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer delete()
|
defer closeFn()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
|
||||||
|
@ -915,19 +930,20 @@ func TestTombstoneClean(t *testing.T) {
|
||||||
testutil.Ok(t, db.CleanTombstones())
|
testutil.Ok(t, db.CleanTombstones())
|
||||||
|
|
||||||
// Compare the result.
|
// Compare the result.
|
||||||
q, err := db.Querier(0, numSamples)
|
q, err := db.Querier(context.TODO(), 0, numSamples)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
|
|
||||||
res, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
expSamples := make([]tsdbutil.Sample, 0, len(c.remaint))
|
||||||
for _, ts := range c.remaint {
|
for _, ts := range c.remaint {
|
||||||
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
expSamples = append(expSamples, sample{ts, smpls[ts]})
|
||||||
}
|
}
|
||||||
|
|
||||||
expss := newMockSeriesSet([]Series{
|
expss := newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{"a": "b"}, expSamples),
|
newSeries(map[string]string{"a": "b"}, expSamples),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -965,11 +981,10 @@ func TestTombstoneClean(t *testing.T) {
|
||||||
// When TombstoneClean errors the original block that should be rebuilt doesn't get deleted so
|
// When TombstoneClean errors the original block that should be rebuilt doesn't get deleted so
|
||||||
// if TombstoneClean leaves any blocks behind these will overlap.
|
// if TombstoneClean leaves any blocks behind these will overlap.
|
||||||
func TestTombstoneCleanFail(t *testing.T) {
|
func TestTombstoneCleanFail(t *testing.T) {
|
||||||
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
db, delete := openTestDB(t, nil)
|
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var expectedBlockDirs []string
|
var expectedBlockDirs []string
|
||||||
|
@ -1041,18 +1056,15 @@ func (c *mockCompactorFailing) Write(dest string, b BlockReader, mint, maxt int6
|
||||||
return block.Meta().ULID, nil
|
return block.Meta().ULID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockCompactorFailing) Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) {
|
func (*mockCompactorFailing) Compact(string, []string, []*Block) (ulid.ULID, error) {
|
||||||
return ulid.ULID{}, nil
|
return ulid.ULID{}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTimeRetention(t *testing.T) {
|
func TestTimeRetention(t *testing.T) {
|
||||||
db, delete := openTestDB(t, &Options{
|
db, closeFn := openTestDB(t, nil, []int64{1000})
|
||||||
BlockRanges: []int64{1000},
|
|
||||||
})
|
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
blocks := []*BlockMeta{
|
blocks := []*BlockMeta{
|
||||||
|
@ -1068,7 +1080,7 @@ func TestTimeRetention(t *testing.T) {
|
||||||
testutil.Ok(t, db.reload()) // Reload the db to register the new blocks.
|
testutil.Ok(t, db.reload()) // Reload the db to register the new blocks.
|
||||||
testutil.Equals(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
|
testutil.Equals(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
|
||||||
|
|
||||||
db.opts.RetentionDuration = uint64(blocks[2].MaxTime - blocks[1].MinTime)
|
db.opts.RetentionDuration = blocks[2].MaxTime - blocks[1].MinTime
|
||||||
testutil.Ok(t, db.reload())
|
testutil.Ok(t, db.reload())
|
||||||
|
|
||||||
expBlocks := blocks[1:]
|
expBlocks := blocks[1:]
|
||||||
|
@ -1081,12 +1093,10 @@ func TestTimeRetention(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSizeRetention(t *testing.T) {
|
func TestSizeRetention(t *testing.T) {
|
||||||
db, delete := openTestDB(t, &Options{
|
db, closeFn := openTestDB(t, nil, []int64{100})
|
||||||
BlockRanges: []int64{100},
|
|
||||||
})
|
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
blocks := []*BlockMeta{
|
blocks := []*BlockMeta{
|
||||||
|
@ -1183,24 +1193,24 @@ func TestSizeRetentionMetric(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
db, delete := openTestDB(t, &Options{
|
db, closeFn := openTestDB(t, &Options{
|
||||||
BlockRanges: []int64{100},
|
MaxBytes: c.maxBytes,
|
||||||
MaxBytes: c.maxBytes,
|
}, []int64{100})
|
||||||
})
|
defer func() {
|
||||||
|
testutil.Ok(t, db.Close())
|
||||||
|
closeFn()
|
||||||
|
}()
|
||||||
|
|
||||||
actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes))
|
actMaxBytes := int64(prom_testutil.ToFloat64(db.metrics.maxBytes))
|
||||||
testutil.Equals(t, actMaxBytes, c.expMaxBytes, "metric retention limit bytes mismatch")
|
testutil.Equals(t, actMaxBytes, c.expMaxBytes, "metric retention limit bytes mismatch")
|
||||||
|
|
||||||
testutil.Ok(t, db.Close())
|
|
||||||
delete()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
|
func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
labelpairs := []labels.Labels{
|
labelpairs := []labels.Labels{
|
||||||
|
@ -1257,13 +1267,14 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
|
||||||
series: labelpairs[:1],
|
series: labelpairs[:1],
|
||||||
}}
|
}}
|
||||||
|
|
||||||
q, err := db.Querier(0, 10)
|
q, err := db.Querier(context.TODO(), 0, 10)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, q.Close()) }()
|
defer func() { testutil.Ok(t, q.Close()) }()
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
ss, err := q.Select(c.selector...)
|
ss, ws, err := q.Select(nil, c.selector...)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
lres, err := expandSeriesSet(ss)
|
lres, err := expandSeriesSet(ss)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
@ -1272,7 +1283,7 @@ func TestNotMatcherSelectsLabelsUnsetSeries(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandSeriesSet(ss SeriesSet) ([]labels.Labels, error) {
|
func expandSeriesSet(ss storage.SeriesSet) ([]labels.Labels, error) {
|
||||||
result := []labels.Labels{}
|
result := []labels.Labels{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
result = append(result, ss.At().Labels())
|
result = append(result, ss.At().Labels())
|
||||||
|
@ -1375,15 +1386,15 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
|
||||||
|
|
||||||
// Regression test for https://github.com/prometheus/prometheus/tsdb/issues/347
|
// Regression test for https://github.com/prometheus/prometheus/tsdb/issues/347
|
||||||
func TestChunkAtBlockBoundary(t *testing.T) {
|
func TestChunkAtBlockBoundary(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
|
||||||
blockRange := DefaultOptions.BlockRanges[0]
|
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
|
||||||
label := labels.FromStrings("foo", "bar")
|
label := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
for i := int64(0); i < 3; i++ {
|
for i := int64(0); i < 3; i++ {
|
||||||
|
@ -1432,15 +1443,15 @@ func TestChunkAtBlockBoundary(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQuerierWithBoundaryChunks(t *testing.T) {
|
func TestQuerierWithBoundaryChunks(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
|
|
||||||
blockRange := DefaultOptions.BlockRanges[0]
|
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
|
||||||
label := labels.FromStrings("foo", "bar")
|
label := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
for i := int64(0); i < 5; i++ {
|
for i := int64(0); i < 5; i++ {
|
||||||
|
@ -1456,7 +1467,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
|
||||||
|
|
||||||
testutil.Assert(t, len(db.blocks) >= 3, "invalid test, less than three blocks in DB")
|
testutil.Assert(t, len(db.blocks) >= 3, "invalid test, less than three blocks in DB")
|
||||||
|
|
||||||
q, err := db.Querier(blockRange, 2*blockRange)
|
q, err := db.Querier(context.TODO(), blockRange, 2*blockRange)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
|
|
||||||
|
@ -1583,16 +1594,14 @@ func TestInitializeHeadTimestamp(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoEmptyBlocks(t *testing.T) {
|
func TestNoEmptyBlocks(t *testing.T) {
|
||||||
db, delete := openTestDB(t, &Options{
|
db, closeFn := openTestDB(t, nil, []int64{100})
|
||||||
BlockRanges: []int64{100},
|
|
||||||
})
|
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
db.DisableCompactions()
|
db.DisableCompactions()
|
||||||
|
|
||||||
rangeToTriggerCompaction := db.opts.BlockRanges[0]/2*3 - 1
|
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 - 1
|
||||||
defaultLabel := labels.FromStrings("foo", "bar")
|
defaultLabel := labels.FromStrings("foo", "bar")
|
||||||
defaultMatcher := labels.MustNewMatcher(labels.MatchRegexp, "", ".*")
|
defaultMatcher := labels.MustNewMatcher(labels.MatchRegexp, "", ".*")
|
||||||
|
|
||||||
|
@ -1666,8 +1675,8 @@ func TestNoEmptyBlocks(t *testing.T) {
|
||||||
t.Run("Test no blocks remaining after deleting all samples from disk.", func(t *testing.T) {
|
t.Run("Test no blocks remaining after deleting all samples from disk.", func(t *testing.T) {
|
||||||
currentTime := db.Head().MaxTime()
|
currentTime := db.Head().MaxTime()
|
||||||
blocks := []*BlockMeta{
|
blocks := []*BlockMeta{
|
||||||
{MinTime: currentTime, MaxTime: currentTime + db.opts.BlockRanges[0]},
|
{MinTime: currentTime, MaxTime: currentTime + db.compactor.(*LeveledCompactor).ranges[0]},
|
||||||
{MinTime: currentTime + 100, MaxTime: currentTime + 100 + db.opts.BlockRanges[0]},
|
{MinTime: currentTime + 100, MaxTime: currentTime + 100 + db.compactor.(*LeveledCompactor).ranges[0]},
|
||||||
}
|
}
|
||||||
for _, m := range blocks {
|
for _, m := range blocks {
|
||||||
createBlock(t, db.Dir(), genSeries(2, 2, m.MinTime, m.MaxTime))
|
createBlock(t, db.Dir(), genSeries(2, 2, m.MinTime, m.MaxTime))
|
||||||
|
@ -1727,7 +1736,7 @@ func TestDB_LabelNames(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
blockRange := DefaultOptions.BlockRanges[0]
|
blockRange := int64(1000)
|
||||||
// Appends samples into the database.
|
// Appends samples into the database.
|
||||||
appendSamples := func(db *DB, mint, maxt int64, sampleLabels [][2]string) {
|
appendSamples := func(db *DB, mint, maxt int64, sampleLabels [][2]string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
@ -1743,10 +1752,10 @@ func TestDB_LabelNames(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
}
|
}
|
||||||
for _, tst := range tests {
|
for _, tst := range tests {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
appendSamples(db, 0, 4, tst.sampleLabels1)
|
appendSamples(db, 0, 4, tst.sampleLabels1)
|
||||||
|
@ -1778,23 +1787,25 @@ func TestDB_LabelNames(t *testing.T) {
|
||||||
appendSamples(db, 5, 9, tst.sampleLabels2)
|
appendSamples(db, 5, 9, tst.sampleLabels2)
|
||||||
|
|
||||||
// Testing DB (union).
|
// Testing DB (union).
|
||||||
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
labelNames, err = q.LabelNames()
|
var ws storage.Warnings
|
||||||
|
labelNames, ws, err = q.LabelNames()
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
testutil.Ok(t, q.Close())
|
testutil.Ok(t, q.Close())
|
||||||
testutil.Equals(t, tst.exp2, labelNames)
|
testutil.Equals(t, tst.exp2, labelNames)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCorrectNumTombstones(t *testing.T) {
|
func TestCorrectNumTombstones(t *testing.T) {
|
||||||
db, delete := openTestDB(t, nil)
|
db, closeFn := openTestDB(t, nil, nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
delete()
|
closeFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
blockRange := DefaultOptions.BlockRanges[0]
|
blockRange := db.compactor.(*LeveledCompactor).ranges[0]
|
||||||
defaultLabel := labels.FromStrings("foo", "bar")
|
defaultLabel := labels.FromStrings("foo", "bar")
|
||||||
defaultMatcher := labels.MustNewMatcher(labels.MatchEqual, defaultLabel[0].Name, defaultLabel[0].Value)
|
defaultMatcher := labels.MustNewMatcher(labels.MatchEqual, defaultLabel[0].Name, defaultLabel[0].Value)
|
||||||
|
|
||||||
|
@ -1827,7 +1838,7 @@ func TestCorrectNumTombstones(t *testing.T) {
|
||||||
|
|
||||||
func TestVerticalCompaction(t *testing.T) {
|
func TestVerticalCompaction(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
blockSeries [][]Series
|
blockSeries [][]storage.Series
|
||||||
expSeries map[string][]tsdbutil.Sample
|
expSeries map[string][]tsdbutil.Sample
|
||||||
expBlockNum int
|
expBlockNum int
|
||||||
expOverlappingBlocks int
|
expOverlappingBlocks int
|
||||||
|
@ -1836,7 +1847,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |--------------|
|
// |--------------|
|
||||||
// |----------------|
|
// |----------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -1864,7 +1875,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |-------------------------------|
|
// |-------------------------------|
|
||||||
// |----------------|
|
// |----------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -1893,7 +1904,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |------------|
|
// |------------|
|
||||||
// |--------------------|
|
// |--------------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -1929,7 +1940,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |--------------------|
|
// |--------------------|
|
||||||
// |----------------|
|
// |----------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -1965,7 +1976,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |------------|
|
// |------------|
|
||||||
// |-------------------------|
|
// |-------------------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -2003,7 +2014,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |------------|
|
// |------------|
|
||||||
// |-------------------------|
|
// |-------------------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -2100,7 +2111,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
// |--------------|
|
// |--------------|
|
||||||
// |----------------|
|
// |----------------|
|
||||||
{
|
{
|
||||||
blockSeries: [][]Series{
|
blockSeries: [][]storage.Series{
|
||||||
{
|
{
|
||||||
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
newSeries(map[string]string{"a": "b"}, []tsdbutil.Sample{
|
||||||
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
sample{0, 0}, sample{1, 0}, sample{2, 0}, sample{4, 0},
|
||||||
|
@ -2154,9 +2165,9 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
for _, series := range c.blockSeries {
|
for _, series := range c.blockSeries {
|
||||||
createBlock(t, tmpdir, series)
|
createBlock(t, tmpdir, series)
|
||||||
}
|
}
|
||||||
opts := *DefaultOptions
|
opts := DefaultOptions()
|
||||||
opts.AllowOverlappingBlocks = true
|
opts.AllowOverlappingBlocks = true
|
||||||
db, err := Open(tmpdir, nil, nil, &opts)
|
db, err := Open(tmpdir, nil, nil, opts)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
|
@ -2165,7 +2176,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
testutil.Assert(t, len(db.blocks) == len(c.blockSeries), "Wrong number of blocks [before compact].")
|
testutil.Assert(t, len(db.blocks) == len(c.blockSeries), "Wrong number of blocks [before compact].")
|
||||||
|
|
||||||
// Vertical Query Merging test.
|
// Vertical Query Merging test.
|
||||||
querier, err := db.Querier(0, 100)
|
querier, err := db.Querier(context.TODO(), 0, 100)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
actSeries := query(t, querier, defaultMatcher)
|
actSeries := query(t, querier, defaultMatcher)
|
||||||
testutil.Equals(t, c.expSeries, actSeries)
|
testutil.Equals(t, c.expSeries, actSeries)
|
||||||
|
@ -2180,7 +2191,7 @@ func TestVerticalCompaction(t *testing.T) {
|
||||||
testutil.Equals(t, c.expOverlappingBlocks, int(prom_testutil.ToFloat64(lc.metrics.overlappingBlocks)), "overlapping blocks count mismatch")
|
testutil.Equals(t, c.expOverlappingBlocks, int(prom_testutil.ToFloat64(lc.metrics.overlappingBlocks)), "overlapping blocks count mismatch")
|
||||||
|
|
||||||
// Query test after merging the overlapping blocks.
|
// Query test after merging the overlapping blocks.
|
||||||
querier, err = db.Querier(0, 100)
|
querier, err = db.Querier(context.TODO(), 0, 100)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
actSeries = query(t, querier, defaultMatcher)
|
actSeries = query(t, querier, defaultMatcher)
|
||||||
testutil.Equals(t, c.expSeries, actSeries)
|
testutil.Equals(t, c.expSeries, actSeries)
|
||||||
|
@ -2203,20 +2214,16 @@ func TestBlockRanges(t *testing.T) {
|
||||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "test_storage")
|
dir, err := ioutil.TempDir("", "test_storage")
|
||||||
if err != nil {
|
testutil.Ok(t, err)
|
||||||
t.Fatalf("Opening test dir failed: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rangeToTriggercompaction := DefaultOptions.BlockRanges[0]/2*3 + 1
|
|
||||||
|
|
||||||
// Test that the compactor doesn't create overlapping blocks
|
// Test that the compactor doesn't create overlapping blocks
|
||||||
// when a non standard block already exists.
|
// when a non standard block already exists.
|
||||||
firstBlockMaxT := int64(3)
|
firstBlockMaxT := int64(3)
|
||||||
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
|
createBlock(t, dir, genSeries(1, 1, 0, firstBlockMaxT))
|
||||||
db, err := Open(dir, logger, nil, DefaultOptions)
|
db, err := open(dir, logger, nil, DefaultOptions(), []int64{10000})
|
||||||
if err != nil {
|
testutil.Ok(t, err)
|
||||||
t.Fatalf("Opening test storage failed: %s", err)
|
|
||||||
}
|
rangeToTriggerCompaction := db.compactor.(*LeveledCompactor).ranges[0]/2*3 + 1
|
||||||
defer func() {
|
defer func() {
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
}()
|
}()
|
||||||
|
@ -2230,7 +2237,7 @@ func TestBlockRanges(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
_, err = app.Add(lbl, firstBlockMaxT+2, rand.Float64())
|
_, err = app.Add(lbl, firstBlockMaxT+2, rand.Float64())
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
secondBlockMaxt := firstBlockMaxT + rangeToTriggercompaction
|
secondBlockMaxt := firstBlockMaxT + rangeToTriggerCompaction
|
||||||
_, err = app.Add(lbl, secondBlockMaxt, rand.Float64()) // Add samples to trigger a new compaction
|
_, err = app.Add(lbl, secondBlockMaxt, rand.Float64()) // Add samples to trigger a new compaction
|
||||||
|
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
@ -2264,16 +2271,15 @@ func TestBlockRanges(t *testing.T) {
|
||||||
thirdBlockMaxt := secondBlockMaxt + 2
|
thirdBlockMaxt := secondBlockMaxt + 2
|
||||||
createBlock(t, dir, genSeries(1, 1, secondBlockMaxt+1, thirdBlockMaxt))
|
createBlock(t, dir, genSeries(1, 1, secondBlockMaxt+1, thirdBlockMaxt))
|
||||||
|
|
||||||
db, err = Open(dir, logger, nil, DefaultOptions)
|
db, err = open(dir, logger, nil, DefaultOptions(), []int64{10000})
|
||||||
if err != nil {
|
testutil.Ok(t, err)
|
||||||
t.Fatalf("Opening test storage failed: %s", err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
testutil.Equals(t, 3, len(db.Blocks()), "db doesn't include expected number of blocks")
|
testutil.Equals(t, 3, len(db.Blocks()), "db doesn't include expected number of blocks")
|
||||||
testutil.Equals(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block")
|
testutil.Equals(t, db.Blocks()[2].Meta().MaxTime, thirdBlockMaxt, "unexpected maxt of the last block")
|
||||||
|
|
||||||
app = db.Appender()
|
app = db.Appender()
|
||||||
_, err = app.Add(lbl, thirdBlockMaxt+rangeToTriggercompaction, rand.Float64()) // Trigger a compaction
|
_, err = app.Add(lbl, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64()) // Trigger a compaction
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, app.Commit())
|
testutil.Ok(t, app.Commit())
|
||||||
for x := 0; x < 100; x++ {
|
for x := 0; x < 100; x++ {
|
||||||
|
@ -2344,7 +2350,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Assert(t, expDbSize > dbSizeBeforeAppend, "db size didn't increase after an append")
|
testutil.Assert(t, expDbSize > dbSizeBeforeAppend, "db size didn't increase after an append")
|
||||||
|
|
||||||
q, err := dbWritable.Querier(math.MinInt64, math.MaxInt64)
|
q, err := dbWritable.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
expSeries = query(t, q, matchAll)
|
expSeries = query(t, q, matchAll)
|
||||||
|
|
||||||
|
@ -2367,7 +2373,7 @@ func TestDBReadOnly(t *testing.T) {
|
||||||
testutil.Equals(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
|
testutil.Equals(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
q, err := dbReadOnly.Querier(math.MinInt64, math.MaxInt64)
|
q, err := dbReadOnly.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
readOnlySeries := query(t, q, matchAll)
|
readOnlySeries := query(t, q, matchAll)
|
||||||
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
readOnlyDBHash := testutil.DirHash(t, dbDir)
|
||||||
|
@ -2393,7 +2399,7 @@ func TestDBReadOnlyClosing(t *testing.T) {
|
||||||
testutil.Equals(t, db.Close(), ErrClosed)
|
testutil.Equals(t, db.Close(), ErrClosed)
|
||||||
_, err = db.Blocks()
|
_, err = db.Blocks()
|
||||||
testutil.Equals(t, err, ErrClosed)
|
testutil.Equals(t, err, ErrClosed)
|
||||||
_, err = db.Querier(0, 1)
|
_, err = db.Querier(context.TODO(), 0, 1)
|
||||||
testutil.Equals(t, err, ErrClosed)
|
testutil.Equals(t, err, ErrClosed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2449,13 +2455,14 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Equals(t, len(blocks), 1)
|
testutil.Equals(t, len(blocks), 1)
|
||||||
|
|
||||||
querier, err := db.Querier(0, int64(maxt)-1)
|
querier, err := db.Querier(context.TODO(), 0, int64(maxt)-1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() { testutil.Ok(t, querier.Close()) }()
|
defer func() { testutil.Ok(t, querier.Close()) }()
|
||||||
|
|
||||||
// Sum the values.
|
// Sum the values.
|
||||||
seriesSet, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "flush"))
|
seriesSet, ws, err := querier.Select(nil, labels.MustNewMatcher(labels.MatchEqual, defaultLabelName, "flush"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
sum := 0.0
|
sum := 0.0
|
||||||
for seriesSet.Next() {
|
for seriesSet.Next() {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
|
@ -800,7 +801,7 @@ func (h *RangeHead) Meta() BlockMeta {
|
||||||
// initAppender is a helper to initialize the time bounds of the head
|
// initAppender is a helper to initialize the time bounds of the head
|
||||||
// upon the first sample it receives.
|
// upon the first sample it receives.
|
||||||
type initAppender struct {
|
type initAppender struct {
|
||||||
app Appender
|
app storage.Appender
|
||||||
head *Head
|
head *Head
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -836,7 +837,7 @@ func (a *initAppender) Rollback() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Appender returns a new Appender on the database.
|
// Appender returns a new Appender on the database.
|
||||||
func (h *Head) Appender() Appender {
|
func (h *Head) Appender() storage.Appender {
|
||||||
h.metrics.activeAppenders.Inc()
|
h.metrics.activeAppenders.Inc()
|
||||||
|
|
||||||
// The head cache might not have a starting point yet. The init appender
|
// The head cache might not have a starting point yet. The init appender
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
|
@ -542,7 +543,7 @@ func TestHeadDeleteSimple(t *testing.T) {
|
||||||
testutil.Ok(t, reloadedHead.Init(0))
|
testutil.Ok(t, reloadedHead.Init(0))
|
||||||
|
|
||||||
// Compare the query results for both heads - before and after the reload.
|
// Compare the query results for both heads - before and after the reload.
|
||||||
expSeriesSet := newMockSeriesSet([]Series{
|
expSeriesSet := newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{lblDefault.Name: lblDefault.Value}, func() []tsdbutil.Sample {
|
newSeries(map[string]string{lblDefault.Name: lblDefault.Value}, func() []tsdbutil.Sample {
|
||||||
ss := make([]tsdbutil.Sample, 0, len(c.smplsExp))
|
ss := make([]tsdbutil.Sample, 0, len(c.smplsExp))
|
||||||
for _, s := range c.smplsExp {
|
for _, s := range c.smplsExp {
|
||||||
|
@ -555,8 +556,9 @@ func TestHeadDeleteSimple(t *testing.T) {
|
||||||
for _, h := range []*Head{head, reloadedHead} {
|
for _, h := range []*Head{head, reloadedHead} {
|
||||||
q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
|
q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
actSeriesSet, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
|
actSeriesSet, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
eok, rok := expSeriesSet.Next(), actSeriesSet.Next()
|
eok, rok := expSeriesSet.Next(), actSeriesSet.Next()
|
||||||
|
@ -601,8 +603,9 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
||||||
// Test the series returns no samples. The series is cleared only after compaction.
|
// Test the series returns no samples. The series is cleared only after compaction.
|
||||||
q, err := NewBlockQuerier(hb, 0, 100000)
|
q, err := NewBlockQuerier(hb, 0, 100000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
res, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
testutil.Assert(t, res.Next(), "series is not present")
|
testutil.Assert(t, res.Next(), "series is not present")
|
||||||
s := res.At()
|
s := res.At()
|
||||||
it := s.Iterator()
|
it := s.Iterator()
|
||||||
|
@ -615,8 +618,9 @@ func TestDeleteUntilCurMax(t *testing.T) {
|
||||||
testutil.Ok(t, app.Commit())
|
testutil.Ok(t, app.Commit())
|
||||||
q, err = NewBlockQuerier(hb, 0, 100000)
|
q, err = NewBlockQuerier(hb, 0, 100000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
res, err = q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
res, ws, err = q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
testutil.Assert(t, res.Next(), "series don't exist")
|
testutil.Assert(t, res.Next(), "series don't exist")
|
||||||
exps := res.At()
|
exps := res.At()
|
||||||
it = exps.Iterator()
|
it = exps.Iterator()
|
||||||
|
@ -790,10 +794,11 @@ func TestDelete_e2e(t *testing.T) {
|
||||||
q, err := NewBlockQuerier(hb, 0, 100000)
|
q, err := NewBlockQuerier(hb, 0, 100000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
ss, err := q.SelectSorted(del.ms...)
|
ss, ws, err := q.SelectSorted(nil, del.ms...)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
// Build the mockSeriesSet.
|
// Build the mockSeriesSet.
|
||||||
matchedSeries := make([]Series, 0, len(matched))
|
matchedSeries := make([]storage.Series, 0, len(matched))
|
||||||
for _, m := range matched {
|
for _, m := range matched {
|
||||||
smpls := seriesMap[m.String()]
|
smpls := seriesMap[m.String()]
|
||||||
smpls = deletedSamples(smpls, del.drange)
|
smpls = deletedSamples(smpls, del.drange)
|
||||||
|
@ -1077,8 +1082,9 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
|
|
||||||
ss, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
|
ss, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
testutil.Equals(t, true, ss.Next())
|
testutil.Equals(t, true, ss.Next())
|
||||||
}
|
}
|
||||||
|
@ -1104,8 +1110,9 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer q.Close()
|
defer q.Close()
|
||||||
|
|
||||||
ss, err := q.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
|
ss, ws, err := q.Select(nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
testutil.Equals(t, false, ss.Next())
|
testutil.Equals(t, false, ss.Next())
|
||||||
|
|
||||||
|
@ -1227,7 +1234,7 @@ func TestWalRepair_DecodingError(t *testing.T) {
|
||||||
|
|
||||||
// Open the db to trigger a repair.
|
// Open the db to trigger a repair.
|
||||||
{
|
{
|
||||||
db, err := Open(dir, nil, nil, DefaultOptions)
|
db, err := Open(dir, nil, nil, DefaultOptions())
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
|
|
298
tsdb/querier.go
298
tsdb/querier.go
|
@ -14,13 +14,13 @@
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
|
@ -28,52 +28,25 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Querier provides querying access over time series data of a fixed
|
|
||||||
// time range.
|
|
||||||
type Querier interface {
|
|
||||||
// Select returns a set of series that matches the given label matchers.
|
|
||||||
Select(...*labels.Matcher) (SeriesSet, error)
|
|
||||||
|
|
||||||
// SelectSorted returns a sorted set of series that matches the given label matcher.
|
|
||||||
SelectSorted(...*labels.Matcher) (SeriesSet, error)
|
|
||||||
|
|
||||||
// LabelValues returns all potential values for a label name.
|
|
||||||
// It is not safe to use the strings beyond the lifefime of the querier.
|
|
||||||
LabelValues(string) ([]string, error)
|
|
||||||
|
|
||||||
// LabelNames returns all the unique label names present in the block in sorted order.
|
|
||||||
LabelNames() ([]string, error)
|
|
||||||
|
|
||||||
// Close releases the resources of the Querier.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Series exposes a single time series.
|
|
||||||
type Series interface {
|
|
||||||
// Labels returns the complete set of labels identifying the series.
|
|
||||||
Labels() labels.Labels
|
|
||||||
|
|
||||||
// Iterator returns a new iterator of the data of the series.
|
|
||||||
Iterator() SeriesIterator
|
|
||||||
}
|
|
||||||
|
|
||||||
// querier aggregates querying results from time blocks within
|
// querier aggregates querying results from time blocks within
|
||||||
// a single partition.
|
// a single partition.
|
||||||
type querier struct {
|
type querier struct {
|
||||||
blocks []Querier
|
blocks []storage.Querier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) LabelValues(n string) ([]string, error) {
|
func (q *querier) LabelValues(n string) ([]string, storage.Warnings, error) {
|
||||||
return q.lvals(q.blocks, n)
|
return q.lvals(q.blocks, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelNames returns all the unique label names present querier blocks.
|
// LabelNames returns all the unique label names present querier blocks.
|
||||||
func (q *querier) LabelNames() ([]string, error) {
|
func (q *querier) LabelNames() ([]string, storage.Warnings, error) {
|
||||||
labelNamesMap := make(map[string]struct{})
|
labelNamesMap := make(map[string]struct{})
|
||||||
|
var ws storage.Warnings
|
||||||
for _, b := range q.blocks {
|
for _, b := range q.blocks {
|
||||||
names, err := b.LabelNames()
|
names, w, err := b.LabelNames()
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "LabelNames() from Querier")
|
return nil, ws, errors.Wrap(err, "LabelNames() from Querier")
|
||||||
}
|
}
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
labelNamesMap[name] = struct{}{}
|
labelNamesMap[name] = struct{}{}
|
||||||
|
@ -86,51 +59,57 @@ func (q *querier) LabelNames() ([]string, error) {
|
||||||
}
|
}
|
||||||
sort.Strings(labelNames)
|
sort.Strings(labelNames)
|
||||||
|
|
||||||
return labelNames, nil
|
return labelNames, ws, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) lvals(qs []Querier, n string) ([]string, error) {
|
func (q *querier) lvals(qs []storage.Querier, n string) ([]string, storage.Warnings, error) {
|
||||||
if len(qs) == 0 {
|
if len(qs) == 0 {
|
||||||
return nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
if len(qs) == 1 {
|
if len(qs) == 1 {
|
||||||
return qs[0].LabelValues(n)
|
return qs[0].LabelValues(n)
|
||||||
}
|
}
|
||||||
l := len(qs) / 2
|
l := len(qs) / 2
|
||||||
s1, err := q.lvals(qs[:l], n)
|
|
||||||
|
var ws storage.Warnings
|
||||||
|
s1, w, err := q.lvals(qs[:l], n)
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ws, err
|
||||||
}
|
}
|
||||||
s2, err := q.lvals(qs[l:], n)
|
s2, ws, err := q.lvals(qs[l:], n)
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ws, err
|
||||||
}
|
}
|
||||||
return mergeStrings(s1, s2), nil
|
return mergeStrings(s1, s2), ws, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *querier) Select(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
if len(q.blocks) != 1 {
|
if len(q.blocks) != 1 {
|
||||||
return q.SelectSorted(ms...)
|
return q.SelectSorted(p, ms...)
|
||||||
}
|
}
|
||||||
// Sorting Head series is slow, and unneeded when only the
|
// Sorting Head series is slow, and unneeded when only the
|
||||||
// Head is being queried. Sorting blocks is a noop.
|
// Head is being queried. Sorting blocks is a noop.
|
||||||
return q.blocks[0].Select(ms...)
|
return q.blocks[0].Select(p, ms...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) SelectSorted(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *querier) SelectSorted(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
if len(q.blocks) == 0 {
|
if len(q.blocks) == 0 {
|
||||||
return EmptySeriesSet(), nil
|
return storage.EmptySeriesSet(), nil, nil
|
||||||
}
|
}
|
||||||
ss := make([]SeriesSet, len(q.blocks))
|
ss := make([]storage.SeriesSet, len(q.blocks))
|
||||||
|
var ws storage.Warnings
|
||||||
for i, b := range q.blocks {
|
for i, b := range q.blocks {
|
||||||
s, err := b.SelectSorted(ms...)
|
s, w, err := b.SelectSorted(p, ms...)
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ws, err
|
||||||
}
|
}
|
||||||
ss[i] = s
|
ss[i] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewMergedSeriesSet(ss), nil
|
return NewMergedSeriesSet(ss), ws, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *querier) Close() error {
|
func (q *querier) Close() error {
|
||||||
|
@ -148,36 +127,39 @@ type verticalQuerier struct {
|
||||||
querier
|
querier
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *verticalQuerier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *verticalQuerier) Select(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
return q.sel(q.blocks, ms)
|
return q.sel(p, q.blocks, ms)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *verticalQuerier) SelectSorted(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *verticalQuerier) SelectSorted(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
return q.sel(q.blocks, ms)
|
return q.sel(p, q.blocks, ms)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *verticalQuerier) sel(qs []Querier, ms []*labels.Matcher) (SeriesSet, error) {
|
func (q *verticalQuerier) sel(p *storage.SelectParams, qs []storage.Querier, ms []*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
if len(qs) == 0 {
|
if len(qs) == 0 {
|
||||||
return EmptySeriesSet(), nil
|
return storage.EmptySeriesSet(), nil, nil
|
||||||
}
|
}
|
||||||
if len(qs) == 1 {
|
if len(qs) == 1 {
|
||||||
return qs[0].SelectSorted(ms...)
|
return qs[0].SelectSorted(p, ms...)
|
||||||
}
|
}
|
||||||
l := len(qs) / 2
|
l := len(qs) / 2
|
||||||
|
|
||||||
a, err := q.sel(qs[:l], ms)
|
var ws storage.Warnings
|
||||||
|
a, w, err := q.sel(p, qs[:l], ms)
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ws, err
|
||||||
}
|
}
|
||||||
b, err := q.sel(qs[l:], ms)
|
b, w, err := q.sel(p, qs[l:], ms)
|
||||||
|
ws = append(ws, w...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ws, err
|
||||||
}
|
}
|
||||||
return newMergedVerticalSeriesSet(a, b), nil
|
return newMergedVerticalSeriesSet(a, b), ws, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBlockQuerier returns a querier against the reader.
|
// NewBlockQuerier returns a querier against the reader.
|
||||||
func NewBlockQuerier(b BlockReader, mint, maxt int64) (Querier, error) {
|
func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
|
||||||
indexr, err := b.Index()
|
indexr, err := b.Index()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "open index reader")
|
return nil, errors.Wrapf(err, "open index reader")
|
||||||
|
@ -213,52 +195,64 @@ type blockQuerier struct {
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) Select(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *blockQuerier) Select(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
|
base, err := LookupChunkSeries(q.index, q.tombstones, ms...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mint := q.mint
|
||||||
|
maxt := q.maxt
|
||||||
|
if p != nil {
|
||||||
|
mint = p.Start
|
||||||
|
maxt = p.End
|
||||||
}
|
}
|
||||||
return &blockSeriesSet{
|
return &blockSeriesSet{
|
||||||
set: &populatedChunkSeries{
|
set: &populatedChunkSeries{
|
||||||
set: base,
|
set: base,
|
||||||
chunks: q.chunks,
|
chunks: q.chunks,
|
||||||
mint: q.mint,
|
mint: mint,
|
||||||
maxt: q.maxt,
|
maxt: maxt,
|
||||||
},
|
},
|
||||||
|
|
||||||
mint: q.mint,
|
mint: mint,
|
||||||
maxt: q.maxt,
|
maxt: maxt,
|
||||||
}, nil
|
}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) SelectSorted(ms ...*labels.Matcher) (SeriesSet, error) {
|
func (q *blockQuerier) SelectSorted(p *storage.SelectParams, ms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) {
|
||||||
base, err := LookupChunkSeriesSorted(q.index, q.tombstones, ms...)
|
base, err := LookupChunkSeriesSorted(q.index, q.tombstones, ms...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mint := q.mint
|
||||||
|
maxt := q.maxt
|
||||||
|
if p != nil {
|
||||||
|
mint = p.Start
|
||||||
|
maxt = p.End
|
||||||
}
|
}
|
||||||
return &blockSeriesSet{
|
return &blockSeriesSet{
|
||||||
set: &populatedChunkSeries{
|
set: &populatedChunkSeries{
|
||||||
set: base,
|
set: base,
|
||||||
chunks: q.chunks,
|
chunks: q.chunks,
|
||||||
mint: q.mint,
|
mint: mint,
|
||||||
maxt: q.maxt,
|
maxt: maxt,
|
||||||
},
|
},
|
||||||
|
|
||||||
mint: q.mint,
|
mint: mint,
|
||||||
maxt: q.maxt,
|
maxt: maxt,
|
||||||
}, nil
|
}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) LabelValues(name string) ([]string, error) {
|
func (q *blockQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
|
||||||
return q.index.LabelValues(name)
|
res, err := q.index.LabelValues(name)
|
||||||
|
return res, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) LabelNames() ([]string, error) {
|
func (q *blockQuerier) LabelNames() ([]string, storage.Warnings, error) {
|
||||||
return q.index.LabelNames()
|
res, err := q.index.LabelNames()
|
||||||
}
|
return res, nil, err
|
||||||
|
|
||||||
func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *blockQuerier) Close() error {
|
func (q *blockQuerier) Close() error {
|
||||||
|
@ -495,32 +489,20 @@ func mergeStrings(a, b []string) []string {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeriesSet contains a set of series.
|
|
||||||
type SeriesSet interface {
|
|
||||||
Next() bool
|
|
||||||
At() Series
|
|
||||||
Err() error
|
|
||||||
}
|
|
||||||
|
|
||||||
var emptySeriesSet = errSeriesSet{}
|
|
||||||
|
|
||||||
// EmptySeriesSet returns a series set that's always empty.
|
|
||||||
func EmptySeriesSet() SeriesSet {
|
|
||||||
return emptySeriesSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergedSeriesSet returns a series sets slice as a single series set. The input series sets
|
// mergedSeriesSet returns a series sets slice as a single series set. The input series sets
|
||||||
// must be sorted and sequential in time.
|
// must be sorted and sequential in time.
|
||||||
|
// TODO(bwplotka): Merge this with merge SeriesSet available in storage package.
|
||||||
type mergedSeriesSet struct {
|
type mergedSeriesSet struct {
|
||||||
all []SeriesSet
|
all []storage.SeriesSet
|
||||||
buf []SeriesSet // A buffer for keeping the order of SeriesSet slice during forwarding the SeriesSet.
|
buf []storage.SeriesSet // A buffer for keeping the order of SeriesSet slice during forwarding the SeriesSet.
|
||||||
ids []int // The indices of chosen SeriesSet for the current run.
|
ids []int // The indices of chosen SeriesSet for the current run.
|
||||||
done bool
|
done bool
|
||||||
err error
|
err error
|
||||||
cur Series
|
cur storage.Series
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMergedSeriesSet(all []SeriesSet) SeriesSet {
|
// TODO(bwplotka): Merge this with merge SeriesSet available in storage package.
|
||||||
|
func NewMergedSeriesSet(all []storage.SeriesSet) storage.SeriesSet {
|
||||||
if len(all) == 1 {
|
if len(all) == 1 {
|
||||||
return all[0]
|
return all[0]
|
||||||
}
|
}
|
||||||
|
@ -535,7 +517,7 @@ func NewMergedSeriesSet(all []SeriesSet) SeriesSet {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mergedSeriesSet) At() Series {
|
func (s *mergedSeriesSet) At() storage.Series {
|
||||||
return s.cur
|
return s.cur
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -620,7 +602,7 @@ func (s *mergedSeriesSet) Next() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s.ids) > 1 {
|
if len(s.ids) > 1 {
|
||||||
series := make([]Series, len(s.ids))
|
series := make([]storage.Series, len(s.ids))
|
||||||
for i, idx := range s.ids {
|
for i, idx := range s.ids {
|
||||||
series[i] = s.all[idx].At()
|
series[i] = s.all[idx].At()
|
||||||
}
|
}
|
||||||
|
@ -632,19 +614,19 @@ func (s *mergedSeriesSet) Next() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type mergedVerticalSeriesSet struct {
|
type mergedVerticalSeriesSet struct {
|
||||||
a, b SeriesSet
|
a, b storage.SeriesSet
|
||||||
cur Series
|
cur storage.Series
|
||||||
adone, bdone bool
|
adone, bdone bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMergedVerticalSeriesSet takes two series sets as a single series set.
|
// NewMergedVerticalSeriesSet takes two series sets as a single series set.
|
||||||
// The input series sets must be sorted and
|
// The input series sets must be sorted and
|
||||||
// the time ranges of the series can be overlapping.
|
// the time ranges of the series can be overlapping.
|
||||||
func NewMergedVerticalSeriesSet(a, b SeriesSet) SeriesSet {
|
func NewMergedVerticalSeriesSet(a, b storage.SeriesSet) storage.SeriesSet {
|
||||||
return newMergedVerticalSeriesSet(a, b)
|
return newMergedVerticalSeriesSet(a, b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMergedVerticalSeriesSet(a, b SeriesSet) *mergedVerticalSeriesSet {
|
func newMergedVerticalSeriesSet(a, b storage.SeriesSet) *mergedVerticalSeriesSet {
|
||||||
s := &mergedVerticalSeriesSet{a: a, b: b}
|
s := &mergedVerticalSeriesSet{a: a, b: b}
|
||||||
// Initialize first elements of both sets as Next() needs
|
// Initialize first elements of both sets as Next() needs
|
||||||
// one element look-ahead.
|
// one element look-ahead.
|
||||||
|
@ -654,7 +636,7 @@ func newMergedVerticalSeriesSet(a, b SeriesSet) *mergedVerticalSeriesSet {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mergedVerticalSeriesSet) At() Series {
|
func (s *mergedVerticalSeriesSet) At() storage.Series {
|
||||||
return s.cur
|
return s.cur
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -690,21 +672,13 @@ func (s *mergedVerticalSeriesSet) Next() bool {
|
||||||
s.cur = s.a.At()
|
s.cur = s.a.At()
|
||||||
s.adone = !s.a.Next()
|
s.adone = !s.a.Next()
|
||||||
} else {
|
} else {
|
||||||
s.cur = &verticalChainedSeries{series: []Series{s.a.At(), s.b.At()}}
|
s.cur = &verticalChainedSeries{series: []storage.Series{s.a.At(), s.b.At()}}
|
||||||
s.adone = !s.a.Next()
|
s.adone = !s.a.Next()
|
||||||
s.bdone = !s.b.Next()
|
s.bdone = !s.b.Next()
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkSeriesSet exposes the chunks and intervals of a series instead of the
|
|
||||||
// actual series itself.
|
|
||||||
type ChunkSeriesSet interface {
|
|
||||||
Next() bool
|
|
||||||
At() (labels.Labels, []chunks.Meta, tombstones.Intervals)
|
|
||||||
Err() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// baseChunkSeries loads the label set and chunk references for a postings
|
// baseChunkSeries loads the label set and chunk references for a postings
|
||||||
// list from an index. It filters out series that have labels set that should be unset.
|
// list from an index. It filters out series that have labels set that should be unset.
|
||||||
type baseChunkSeries struct {
|
type baseChunkSeries struct {
|
||||||
|
@ -720,17 +694,17 @@ type baseChunkSeries struct {
|
||||||
|
|
||||||
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
||||||
// over them. It drops chunks based on tombstones in the given reader.
|
// over them. It drops chunks based on tombstones in the given reader.
|
||||||
func LookupChunkSeries(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (ChunkSeriesSet, error) {
|
func LookupChunkSeries(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.ChunkSeriesSet, error) {
|
||||||
return lookupChunkSeries(false, ir, tr, ms...)
|
return lookupChunkSeries(false, ir, tr, ms...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
// LookupChunkSeries retrieves all series for the given matchers and returns a ChunkSeriesSet
|
||||||
// over them. It drops chunks based on tombstones in the given reader. Series will be in order.
|
// over them. It drops chunks based on tombstones in the given reader. Series will be in order.
|
||||||
func LookupChunkSeriesSorted(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (ChunkSeriesSet, error) {
|
func LookupChunkSeriesSorted(ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.ChunkSeriesSet, error) {
|
||||||
return lookupChunkSeries(true, ir, tr, ms...)
|
return lookupChunkSeries(true, ir, tr, ms...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupChunkSeries(sorted bool, ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (ChunkSeriesSet, error) {
|
func lookupChunkSeries(sorted bool, ir IndexReader, tr tombstones.Reader, ms ...*labels.Matcher) (storage.ChunkSeriesSet, error) {
|
||||||
if tr == nil {
|
if tr == nil {
|
||||||
tr = tombstones.NewMemTombstones()
|
tr = tombstones.NewMemTombstones()
|
||||||
}
|
}
|
||||||
|
@ -804,7 +778,7 @@ func (s *baseChunkSeries) Next() bool {
|
||||||
// with known chunk references. It filters out chunks that do not fit the
|
// with known chunk references. It filters out chunks that do not fit the
|
||||||
// given time range.
|
// given time range.
|
||||||
type populatedChunkSeries struct {
|
type populatedChunkSeries struct {
|
||||||
set ChunkSeriesSet
|
set storage.ChunkSeriesSet
|
||||||
chunks ChunkReader
|
chunks ChunkReader
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
|
|
||||||
|
@ -872,9 +846,9 @@ func (s *populatedChunkSeries) Next() bool {
|
||||||
|
|
||||||
// blockSeriesSet is a set of series from an inverted index query.
|
// blockSeriesSet is a set of series from an inverted index query.
|
||||||
type blockSeriesSet struct {
|
type blockSeriesSet struct {
|
||||||
set ChunkSeriesSet
|
set storage.ChunkSeriesSet
|
||||||
err error
|
err error
|
||||||
cur Series
|
cur storage.Series
|
||||||
|
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
}
|
}
|
||||||
|
@ -898,8 +872,8 @@ func (s *blockSeriesSet) Next() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *blockSeriesSet) At() Series { return s.cur }
|
func (s *blockSeriesSet) At() storage.Series { return s.cur }
|
||||||
func (s *blockSeriesSet) Err() error { return s.err }
|
func (s *blockSeriesSet) Err() error { return s.err }
|
||||||
|
|
||||||
// chunkSeries is a series that is backed by a sequence of chunks holding
|
// chunkSeries is a series that is backed by a sequence of chunks holding
|
||||||
// time series data.
|
// time series data.
|
||||||
|
@ -916,48 +890,34 @@ func (s *chunkSeries) Labels() labels.Labels {
|
||||||
return s.labels
|
return s.labels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *chunkSeries) Iterator() SeriesIterator {
|
func (s *chunkSeries) Iterator() chunkenc.Iterator {
|
||||||
return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
|
return newChunkSeriesIterator(s.chunks, s.intervals, s.mint, s.maxt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeriesIterator iterates over the data of a time series.
|
|
||||||
type SeriesIterator interface {
|
|
||||||
// Seek advances the iterator forward to the given timestamp.
|
|
||||||
// If there's no value exactly at t, it advances to the first value
|
|
||||||
// after t.
|
|
||||||
Seek(t int64) bool
|
|
||||||
// At returns the current timestamp/value pair.
|
|
||||||
At() (t int64, v float64)
|
|
||||||
// Next advances the iterator by one.
|
|
||||||
Next() bool
|
|
||||||
// Err returns the current error.
|
|
||||||
Err() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// chainedSeries implements a series for a list of time-sorted series.
|
// chainedSeries implements a series for a list of time-sorted series.
|
||||||
// They all must have the same labels.
|
// They all must have the same labels.
|
||||||
type chainedSeries struct {
|
type chainedSeries struct {
|
||||||
series []Series
|
series []storage.Series
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *chainedSeries) Labels() labels.Labels {
|
func (s *chainedSeries) Labels() labels.Labels {
|
||||||
return s.series[0].Labels()
|
return s.series[0].Labels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *chainedSeries) Iterator() SeriesIterator {
|
func (s *chainedSeries) Iterator() chunkenc.Iterator {
|
||||||
return newChainedSeriesIterator(s.series...)
|
return newChainedSeriesIterator(s.series...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chainedSeriesIterator implements a series iterator over a list
|
// chainedSeriesIterator implements a series iterator over a list
|
||||||
// of time-sorted, non-overlapping iterators.
|
// of time-sorted, non-overlapping iterators.
|
||||||
type chainedSeriesIterator struct {
|
type chainedSeriesIterator struct {
|
||||||
series []Series // series in time order
|
series []storage.Series // series in time order
|
||||||
|
|
||||||
i int
|
i int
|
||||||
cur SeriesIterator
|
cur chunkenc.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func newChainedSeriesIterator(s ...Series) *chainedSeriesIterator {
|
func newChainedSeriesIterator(s ...storage.Series) *chainedSeriesIterator {
|
||||||
return &chainedSeriesIterator{
|
return &chainedSeriesIterator{
|
||||||
series: s,
|
series: s,
|
||||||
i: 0,
|
i: 0,
|
||||||
|
@ -1008,28 +968,28 @@ func (it *chainedSeriesIterator) Err() error {
|
||||||
// verticalChainedSeries implements a series for a list of time-sorted, time-overlapping series.
|
// verticalChainedSeries implements a series for a list of time-sorted, time-overlapping series.
|
||||||
// They all must have the same labels.
|
// They all must have the same labels.
|
||||||
type verticalChainedSeries struct {
|
type verticalChainedSeries struct {
|
||||||
series []Series
|
series []storage.Series
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *verticalChainedSeries) Labels() labels.Labels {
|
func (s *verticalChainedSeries) Labels() labels.Labels {
|
||||||
return s.series[0].Labels()
|
return s.series[0].Labels()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *verticalChainedSeries) Iterator() SeriesIterator {
|
func (s *verticalChainedSeries) Iterator() chunkenc.Iterator {
|
||||||
return newVerticalMergeSeriesIterator(s.series...)
|
return newVerticalMergeSeriesIterator(s.series...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verticalMergeSeriesIterator implements a series iterator over a list
|
// verticalMergeSeriesIterator implements a series iterator over a list
|
||||||
// of time-sorted, time-overlapping iterators.
|
// of time-sorted, time-overlapping iterators.
|
||||||
type verticalMergeSeriesIterator struct {
|
type verticalMergeSeriesIterator struct {
|
||||||
a, b SeriesIterator
|
a, b chunkenc.Iterator
|
||||||
aok, bok, initialized bool
|
aok, bok, initialized bool
|
||||||
|
|
||||||
curT int64
|
curT int64
|
||||||
curV float64
|
curV float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newVerticalMergeSeriesIterator(s ...Series) SeriesIterator {
|
func newVerticalMergeSeriesIterator(s ...storage.Series) chunkenc.Iterator {
|
||||||
if len(s) == 1 {
|
if len(s) == 1 {
|
||||||
return s[0].Iterator()
|
return s[0].Iterator()
|
||||||
} else if len(s) == 2 {
|
} else if len(s) == 2 {
|
||||||
|
@ -1219,6 +1179,13 @@ func (it *deletedIterator) At() (int64, float64) {
|
||||||
return it.it.At()
|
return it.it.At()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (it *deletedIterator) Seek(t int64) bool {
|
||||||
|
if it.it.Err() != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return it.it.Seek(t)
|
||||||
|
}
|
||||||
|
|
||||||
func (it *deletedIterator) Next() bool {
|
func (it *deletedIterator) Next() bool {
|
||||||
Outer:
|
Outer:
|
||||||
for it.it.Next() {
|
for it.it.Next() {
|
||||||
|
@ -1229,28 +1196,15 @@ Outer:
|
||||||
continue Outer
|
continue Outer
|
||||||
}
|
}
|
||||||
|
|
||||||
if ts > tr.Maxt {
|
if ts <= tr.Maxt {
|
||||||
it.intervals = it.intervals[1:]
|
return true
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
it.intervals = it.intervals[1:]
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (it *deletedIterator) Err() error {
|
func (it *deletedIterator) Err() error { return it.it.Err() }
|
||||||
return it.it.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
type errSeriesSet struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s errSeriesSet) Next() bool { return false }
|
|
||||||
func (s errSeriesSet) At() Series { return nil }
|
|
||||||
func (s errSeriesSet) Err() error { return s.err }
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -145,11 +146,11 @@ func BenchmarkQuerierSelect(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
var ss SeriesSet
|
var ss storage.SeriesSet
|
||||||
if sorted {
|
if sorted {
|
||||||
ss, err = q.SelectSorted(matcher)
|
ss, _, err = q.SelectSorted(nil, matcher)
|
||||||
} else {
|
} else {
|
||||||
ss, err = q.Select(matcher)
|
ss, _, err = q.Select(nil, matcher)
|
||||||
}
|
}
|
||||||
testutil.Ok(b, err)
|
testutil.Ok(b, err)
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
package tsdb
|
package tsdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
@ -26,6 +27,7 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/prometheus/pkg/labels"
|
"github.com/prometheus/prometheus/pkg/labels"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
|
@ -36,22 +38,22 @@ import (
|
||||||
|
|
||||||
type mockSeriesSet struct {
|
type mockSeriesSet struct {
|
||||||
next func() bool
|
next func() bool
|
||||||
series func() Series
|
series func() storage.Series
|
||||||
err func() error
|
err func() error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockSeriesSet) Next() bool { return m.next() }
|
func (m *mockSeriesSet) Next() bool { return m.next() }
|
||||||
func (m *mockSeriesSet) At() Series { return m.series() }
|
func (m *mockSeriesSet) At() storage.Series { return m.series() }
|
||||||
func (m *mockSeriesSet) Err() error { return m.err() }
|
func (m *mockSeriesSet) Err() error { return m.err() }
|
||||||
|
|
||||||
func newMockSeriesSet(list []Series) *mockSeriesSet {
|
func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
|
||||||
i := -1
|
i := -1
|
||||||
return &mockSeriesSet{
|
return &mockSeriesSet{
|
||||||
next: func() bool {
|
next: func() bool {
|
||||||
i++
|
i++
|
||||||
return i < len(list)
|
return i < len(list)
|
||||||
},
|
},
|
||||||
series: func() Series {
|
series: func() storage.Series {
|
||||||
return list[i]
|
return list[i]
|
||||||
},
|
},
|
||||||
err: func() error { return nil },
|
err: func() error { return nil },
|
||||||
|
@ -63,20 +65,20 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
// The input sets in order (samples in series in b are strictly
|
// The input sets in order (samples in series in b are strictly
|
||||||
// after those in a).
|
// after those in a).
|
||||||
a, b SeriesSet
|
a, b storage.SeriesSet
|
||||||
// The composition of a and b in the partition series set must yield
|
// The composition of a and b in the partition series set must yield
|
||||||
// results equivalent to the result series set.
|
// results equivalent to the result series set.
|
||||||
exp SeriesSet
|
exp storage.SeriesSet
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
a: newMockSeriesSet([]Series{
|
a: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
}, []tsdbutil.Sample{
|
}, []tsdbutil.Sample{
|
||||||
sample{t: 1, v: 1},
|
sample{t: 1, v: 1},
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
b: newMockSeriesSet([]Series{
|
b: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
}, []tsdbutil.Sample{
|
}, []tsdbutil.Sample{
|
||||||
|
@ -88,7 +90,7 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
sample{t: 1, v: 1},
|
sample{t: 1, v: 1},
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
}, []tsdbutil.Sample{
|
}, []tsdbutil.Sample{
|
||||||
|
@ -103,7 +105,7 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
a: newMockSeriesSet([]Series{
|
a: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"handler": "prometheus",
|
"handler": "prometheus",
|
||||||
"instance": "127.0.0.1:9090",
|
"instance": "127.0.0.1:9090",
|
||||||
|
@ -117,7 +119,7 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
sample{t: 1, v: 2},
|
sample{t: 1, v: 2},
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
b: newMockSeriesSet([]Series{
|
b: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"handler": "prometheus",
|
"handler": "prometheus",
|
||||||
"instance": "127.0.0.1:9090",
|
"instance": "127.0.0.1:9090",
|
||||||
|
@ -131,7 +133,7 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
sample{t: 2, v: 2},
|
sample{t: 2, v: 2},
|
||||||
}),
|
}),
|
||||||
}),
|
}),
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"handler": "prometheus",
|
"handler": "prometheus",
|
||||||
"instance": "127.0.0.1:9090",
|
"instance": "127.0.0.1:9090",
|
||||||
|
@ -157,7 +159,7 @@ func TestMergedSeriesSet(t *testing.T) {
|
||||||
|
|
||||||
Outer:
|
Outer:
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
res := NewMergedSeriesSet([]SeriesSet{c.a, c.b})
|
res := NewMergedSeriesSet([]storage.SeriesSet{c.a, c.b})
|
||||||
|
|
||||||
for {
|
for {
|
||||||
eok, rok := c.exp.Next(), res.Next()
|
eok, rok := c.exp.Next(), res.Next()
|
||||||
|
@ -180,7 +182,7 @@ Outer:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandSeriesIterator(it SeriesIterator) (r []tsdbutil.Sample, err error) {
|
func expandSeriesIterator(it chunkenc.Iterator) (r []tsdbutil.Sample, err error) {
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
t, v := it.At()
|
t, v := it.At()
|
||||||
r = append(r, sample{t: t, v: v})
|
r = append(r, sample{t: t, v: v})
|
||||||
|
@ -258,17 +260,17 @@ func createIdxChkReaders(t *testing.T, tc []seriesSamples) (IndexReader, ChunkRe
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockQuerier(t *testing.T) {
|
func TestBlockQuerier(t *testing.T) {
|
||||||
newSeries := func(l map[string]string, s []tsdbutil.Sample) Series {
|
newSeries := func(l map[string]string, s []tsdbutil.Sample) storage.Series {
|
||||||
return &mockSeries{
|
return &mockSeries{
|
||||||
labels: func() labels.Labels { return labels.FromMap(l) },
|
labels: func() labels.Labels { return labels.FromMap(l) },
|
||||||
iterator: func() SeriesIterator { return newListSeriesIterator(s) },
|
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type query struct {
|
type query struct {
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
ms []*labels.Matcher
|
ms []*labels.Matcher
|
||||||
exp SeriesSet
|
exp storage.SeriesSet
|
||||||
}
|
}
|
||||||
|
|
||||||
cases := struct {
|
cases := struct {
|
||||||
|
@ -324,25 +326,25 @@ func TestBlockQuerier(t *testing.T) {
|
||||||
mint: 0,
|
mint: 0,
|
||||||
maxt: 0,
|
maxt: 0,
|
||||||
ms: []*labels.Matcher{},
|
ms: []*labels.Matcher{},
|
||||||
exp: newMockSeriesSet([]Series{}),
|
exp: newMockSeriesSet([]storage.Series{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mint: 0,
|
mint: 0,
|
||||||
maxt: 0,
|
maxt: 0,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{}),
|
exp: newMockSeriesSet([]storage.Series{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mint: 1,
|
mint: 1,
|
||||||
maxt: 0,
|
maxt: 0,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{}),
|
exp: newMockSeriesSet([]storage.Series{}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
mint: 2,
|
mint: 2,
|
||||||
maxt: 6,
|
maxt: 6,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
},
|
},
|
||||||
|
@ -371,8 +373,9 @@ Outer:
|
||||||
maxt: c.maxt,
|
maxt: c.maxt,
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := querier.Select(c.ms...)
|
res, ws, err := querier.Select(nil, c.ms...)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
eok, rok := c.exp.Next(), res.Next()
|
eok, rok := c.exp.Next(), res.Next()
|
||||||
|
@ -396,17 +399,17 @@ Outer:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockQuerierDelete(t *testing.T) {
|
func TestBlockQuerierDelete(t *testing.T) {
|
||||||
newSeries := func(l map[string]string, s []tsdbutil.Sample) Series {
|
newSeries := func(l map[string]string, s []tsdbutil.Sample) storage.Series {
|
||||||
return &mockSeries{
|
return &mockSeries{
|
||||||
labels: func() labels.Labels { return labels.FromMap(l) },
|
labels: func() labels.Labels { return labels.FromMap(l) },
|
||||||
iterator: func() SeriesIterator { return newListSeriesIterator(s) },
|
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type query struct {
|
type query struct {
|
||||||
mint, maxt int64
|
mint, maxt int64
|
||||||
ms []*labels.Matcher
|
ms []*labels.Matcher
|
||||||
exp SeriesSet
|
exp storage.SeriesSet
|
||||||
}
|
}
|
||||||
|
|
||||||
cases := struct {
|
cases := struct {
|
||||||
|
@ -467,7 +470,7 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
mint: 2,
|
mint: 2,
|
||||||
maxt: 7,
|
maxt: 7,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
},
|
},
|
||||||
|
@ -485,7 +488,7 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
mint: 2,
|
mint: 2,
|
||||||
maxt: 7,
|
maxt: 7,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "b", "b")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "b", "b")},
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
"b": "b",
|
"b": "b",
|
||||||
|
@ -503,7 +506,7 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
mint: 1,
|
mint: 1,
|
||||||
maxt: 4,
|
maxt: 4,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{
|
exp: newMockSeriesSet([]storage.Series{
|
||||||
newSeries(map[string]string{
|
newSeries(map[string]string{
|
||||||
"a": "a",
|
"a": "a",
|
||||||
"b": "b",
|
"b": "b",
|
||||||
|
@ -516,12 +519,11 @@ func TestBlockQuerierDelete(t *testing.T) {
|
||||||
mint: 1,
|
mint: 1,
|
||||||
maxt: 3,
|
maxt: 3,
|
||||||
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
ms: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "a")},
|
||||||
exp: newMockSeriesSet([]Series{}),
|
exp: newMockSeriesSet([]storage.Series{}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("tombstones", cases.tombstones)
|
|
||||||
Outer:
|
Outer:
|
||||||
for _, c := range cases.queries {
|
for _, c := range cases.queries {
|
||||||
ir, cr, _, _ := createIdxChkReaders(t, cases.data)
|
ir, cr, _, _ := createIdxChkReaders(t, cases.data)
|
||||||
|
@ -534,8 +536,9 @@ Outer:
|
||||||
maxt: c.maxt,
|
maxt: c.maxt,
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := querier.Select(c.ms...)
|
res, ws, err := querier.Select(nil, c.ms...)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
testutil.Equals(t, 0, len(ws))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
eok, rok := c.exp.Next(), res.Next()
|
eok, rok := c.exp.Next(), res.Next()
|
||||||
|
@ -654,13 +657,12 @@ func TestBaseChunkSeries(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Remove after simpleSeries is merged
|
|
||||||
type itSeries struct {
|
type itSeries struct {
|
||||||
si SeriesIterator
|
si chunkenc.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s itSeries) Iterator() SeriesIterator { return s.si }
|
func (s itSeries) Iterator() chunkenc.Iterator { return s.si }
|
||||||
func (s itSeries) Labels() labels.Labels { return labels.Labels{} }
|
func (s itSeries) Labels() labels.Labels { return labels.Labels{} }
|
||||||
|
|
||||||
func TestSeriesIterator(t *testing.T) {
|
func TestSeriesIterator(t *testing.T) {
|
||||||
itcases := []struct {
|
itcases := []struct {
|
||||||
|
@ -1003,7 +1005,7 @@ func TestSeriesIterator(t *testing.T) {
|
||||||
|
|
||||||
t.Run("Seek", func(t *testing.T) {
|
t.Run("Seek", func(t *testing.T) {
|
||||||
for _, tc := range seekcases {
|
for _, tc := range seekcases {
|
||||||
ress := []SeriesIterator{
|
ress := []chunkenc.Iterator{
|
||||||
newChainedSeriesIterator(
|
newChainedSeriesIterator(
|
||||||
itSeries{newListSeriesIterator(tc.a)},
|
itSeries{newListSeriesIterator(tc.a)},
|
||||||
itSeries{newListSeriesIterator(tc.b)},
|
itSeries{newListSeriesIterator(tc.b)},
|
||||||
|
@ -1167,8 +1169,9 @@ func (m *mockChunkSeriesSet) Err() error {
|
||||||
|
|
||||||
// Test the cost of merging series sets for different number of merged sets and their size.
|
// Test the cost of merging series sets for different number of merged sets and their size.
|
||||||
// The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well.
|
// The subset are all equivalent so this does not capture merging of partial or non-overlapping sets well.
|
||||||
|
// TODO(bwplotka): Merge with storage merged series set benchmark.
|
||||||
func BenchmarkMergedSeriesSet(b *testing.B) {
|
func BenchmarkMergedSeriesSet(b *testing.B) {
|
||||||
var sel = func(sets []SeriesSet) SeriesSet {
|
var sel = func(sets []storage.SeriesSet) storage.SeriesSet {
|
||||||
return NewMergedSeriesSet(sets)
|
return NewMergedSeriesSet(sets)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1185,7 +1188,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
|
||||||
|
|
||||||
sort.Sort(labels.Slice(lbls))
|
sort.Sort(labels.Slice(lbls))
|
||||||
|
|
||||||
in := make([][]Series, j)
|
in := make([][]storage.Series, j)
|
||||||
|
|
||||||
for _, l := range lbls {
|
for _, l := range lbls {
|
||||||
l2 := l
|
l2 := l
|
||||||
|
@ -1197,7 +1200,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
var sets []SeriesSet
|
var sets []storage.SeriesSet
|
||||||
for _, s := range in {
|
for _, s := range in {
|
||||||
sets = append(sets, newMockSeriesSet(s))
|
sets = append(sets, newMockSeriesSet(s))
|
||||||
}
|
}
|
||||||
|
@ -1257,6 +1260,103 @@ func TestDeletedIterator(t *testing.T) {
|
||||||
{r: tombstones.Intervals{{Mint: 1000, Maxt: 20000}}},
|
{r: tombstones.Intervals{{Mint: 1000, Maxt: 20000}}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
t.Run("Simple", func(t *testing.T) {
|
||||||
|
i := int64(-1)
|
||||||
|
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
|
||||||
|
ranges := c.r[:]
|
||||||
|
for it.Next() {
|
||||||
|
i++
|
||||||
|
for _, tr := range ranges {
|
||||||
|
if tr.InBounds(i) {
|
||||||
|
i = tr.Maxt + 1
|
||||||
|
ranges = ranges[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Assert(t, i < 1000, "")
|
||||||
|
|
||||||
|
ts, v := it.At()
|
||||||
|
testutil.Equals(t, act[i].t, ts)
|
||||||
|
testutil.Equals(t, act[i].v, v)
|
||||||
|
}
|
||||||
|
// There has been an extra call to Next().
|
||||||
|
i++
|
||||||
|
for _, tr := range ranges {
|
||||||
|
if tr.InBounds(i) {
|
||||||
|
i = tr.Maxt + 1
|
||||||
|
ranges = ranges[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Assert(t, i >= 1000, "")
|
||||||
|
testutil.Ok(t, it.Err())
|
||||||
|
})
|
||||||
|
t.Run("Seek", func(t *testing.T) {
|
||||||
|
const seek = 10
|
||||||
|
|
||||||
|
i := int64(seek)
|
||||||
|
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
|
||||||
|
ranges := c.r[:]
|
||||||
|
|
||||||
|
testutil.Assert(t, it.Seek(seek), "")
|
||||||
|
for it.Next() {
|
||||||
|
i++
|
||||||
|
for _, tr := range ranges {
|
||||||
|
if tr.InBounds(i) {
|
||||||
|
i = tr.Maxt + 1
|
||||||
|
ranges = ranges[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Assert(t, i < 1000, "")
|
||||||
|
|
||||||
|
ts, v := it.At()
|
||||||
|
testutil.Equals(t, act[i].t, ts)
|
||||||
|
testutil.Equals(t, act[i].v, v)
|
||||||
|
}
|
||||||
|
// There has been an extra call to Next().
|
||||||
|
i++
|
||||||
|
for _, tr := range ranges {
|
||||||
|
if tr.InBounds(i) {
|
||||||
|
i = tr.Maxt + 1
|
||||||
|
ranges = ranges[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.Assert(t, i >= 1000, "")
|
||||||
|
testutil.Ok(t, it.Err())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeletedIterator_WithSeek(t *testing.T) {
|
||||||
|
chk := chunkenc.NewXORChunk()
|
||||||
|
app, err := chk.Appender()
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
// Insert random stuff from (0, 1000).
|
||||||
|
act := make([]sample, 1000)
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
act[i].t = int64(i)
|
||||||
|
act[i].v = rand.Float64()
|
||||||
|
app.Append(act[i].t, act[i].v)
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
r tombstones.Intervals
|
||||||
|
}{
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 20}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 21, Maxt: 23}, {Mint: 25, Maxt: 30}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 20}, {Mint: 20, Maxt: 30}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 10}, {Mint: 12, Maxt: 23}, {Mint: 25, Maxt: 30}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 30}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1, Maxt: 23}, {Mint: 12, Maxt: 20}, {Mint: 25, Maxt: 3000}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 0, Maxt: 2000}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 500, Maxt: 2000}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 0, Maxt: 200}}},
|
||||||
|
{r: tombstones.Intervals{{Mint: 1000, Maxt: 20000}}},
|
||||||
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
i := int64(-1)
|
i := int64(-1)
|
||||||
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
|
it := &deletedIterator{it: chk.Iterator(nil), intervals: c.r[:]}
|
||||||
|
@ -1414,17 +1514,17 @@ func (m mockIndex) LabelNames() ([]string, error) {
|
||||||
|
|
||||||
type mockSeries struct {
|
type mockSeries struct {
|
||||||
labels func() labels.Labels
|
labels func() labels.Labels
|
||||||
iterator func() SeriesIterator
|
iterator func() chunkenc.Iterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSeries(l map[string]string, s []tsdbutil.Sample) Series {
|
func newSeries(l map[string]string, s []tsdbutil.Sample) storage.Series {
|
||||||
return &mockSeries{
|
return &mockSeries{
|
||||||
labels: func() labels.Labels { return labels.FromMap(l) },
|
labels: func() labels.Labels { return labels.FromMap(l) },
|
||||||
iterator: func() SeriesIterator { return newListSeriesIterator(s) },
|
iterator: func() chunkenc.Iterator { return newListSeriesIterator(s) },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (m *mockSeries) Labels() labels.Labels { return m.labels() }
|
func (m *mockSeries) Labels() labels.Labels { return m.labels() }
|
||||||
func (m *mockSeries) Iterator() SeriesIterator { return m.iterator() }
|
func (m *mockSeries) Iterator() chunkenc.Iterator { return m.iterator() }
|
||||||
|
|
||||||
type listSeriesIterator struct {
|
type listSeriesIterator struct {
|
||||||
list []tsdbutil.Sample
|
list []tsdbutil.Sample
|
||||||
|
@ -1493,7 +1593,7 @@ func BenchmarkQueryIterator(b *testing.B) {
|
||||||
blocks []*Block
|
blocks []*Block
|
||||||
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
||||||
prefilledLabels []map[string]string
|
prefilledLabels []map[string]string
|
||||||
generatedSeries []Series
|
generatedSeries []storage.Series
|
||||||
)
|
)
|
||||||
for i := int64(0); i < int64(c.numBlocks); i++ {
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
||||||
offset := i * overlapDelta
|
offset := i * overlapDelta
|
||||||
|
@ -1514,7 +1614,7 @@ func BenchmarkQueryIterator(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
que := &querier{
|
que := &querier{
|
||||||
blocks: make([]Querier, 0, len(blocks)),
|
blocks: make([]storage.Querier, 0, len(blocks)),
|
||||||
}
|
}
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
||||||
|
@ -1522,7 +1622,7 @@ func BenchmarkQueryIterator(b *testing.B) {
|
||||||
que.blocks = append(que.blocks, q)
|
que.blocks = append(que.blocks, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sq Querier = que
|
var sq storage.Querier = que
|
||||||
if overlapPercentage > 0 {
|
if overlapPercentage > 0 {
|
||||||
sq = &verticalQuerier{
|
sq = &verticalQuerier{
|
||||||
querier: *que,
|
querier: *que,
|
||||||
|
@ -1567,7 +1667,7 @@ func BenchmarkQuerySeek(b *testing.B) {
|
||||||
blocks []*Block
|
blocks []*Block
|
||||||
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
overlapDelta = int64(overlapPercentage * c.numSamplesPerSeriesPerBlock / 100)
|
||||||
prefilledLabels []map[string]string
|
prefilledLabels []map[string]string
|
||||||
generatedSeries []Series
|
generatedSeries []storage.Series
|
||||||
)
|
)
|
||||||
for i := int64(0); i < int64(c.numBlocks); i++ {
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
||||||
offset := i * overlapDelta
|
offset := i * overlapDelta
|
||||||
|
@ -1588,7 +1688,7 @@ func BenchmarkQuerySeek(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
que := &querier{
|
que := &querier{
|
||||||
blocks: make([]Querier, 0, len(blocks)),
|
blocks: make([]storage.Querier, 0, len(blocks)),
|
||||||
}
|
}
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
||||||
|
@ -1596,7 +1696,7 @@ func BenchmarkQuerySeek(b *testing.B) {
|
||||||
que.blocks = append(que.blocks, q)
|
que.blocks = append(que.blocks, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
var sq Querier = que
|
var sq storage.Querier = que
|
||||||
if overlapPercentage > 0 {
|
if overlapPercentage > 0 {
|
||||||
sq = &verticalQuerier{
|
sq = &verticalQuerier{
|
||||||
querier: *que,
|
querier: *que,
|
||||||
|
@ -1610,7 +1710,7 @@ func BenchmarkQuerySeek(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
ss, err := sq.Select(labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
ss, ws, err := sq.Select(nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
it := ss.At().Iterator()
|
it := ss.At().Iterator()
|
||||||
for t := mint; t <= maxt; t++ {
|
for t := mint; t <= maxt; t++ {
|
||||||
|
@ -1620,6 +1720,7 @@ func BenchmarkQuerySeek(b *testing.B) {
|
||||||
}
|
}
|
||||||
testutil.Ok(b, ss.Err())
|
testutil.Ok(b, ss.Err())
|
||||||
testutil.Ok(b, err)
|
testutil.Ok(b, err)
|
||||||
|
testutil.Equals(b, 0, len(ws))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1713,7 +1814,7 @@ func BenchmarkSetMatcher(b *testing.B) {
|
||||||
var (
|
var (
|
||||||
blocks []*Block
|
blocks []*Block
|
||||||
prefilledLabels []map[string]string
|
prefilledLabels []map[string]string
|
||||||
generatedSeries []Series
|
generatedSeries []storage.Series
|
||||||
)
|
)
|
||||||
for i := int64(0); i < int64(c.numBlocks); i++ {
|
for i := int64(0); i < int64(c.numBlocks); i++ {
|
||||||
mint := i * int64(c.numSamplesPerSeriesPerBlock)
|
mint := i * int64(c.numSamplesPerSeriesPerBlock)
|
||||||
|
@ -1733,7 +1834,7 @@ func BenchmarkSetMatcher(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
que := &querier{
|
que := &querier{
|
||||||
blocks: make([]Querier, 0, len(blocks)),
|
blocks: make([]storage.Querier, 0, len(blocks)),
|
||||||
}
|
}
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
q, err := NewBlockQuerier(blk, math.MinInt64, math.MaxInt64)
|
||||||
|
@ -1747,8 +1848,9 @@ func BenchmarkSetMatcher(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
_, err := que.Select(labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
|
_, ws, err := que.Select(nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
|
||||||
testutil.Ok(b, err)
|
testutil.Ok(b, err)
|
||||||
|
testutil.Equals(b, 0, len(ws))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -2076,7 +2178,7 @@ func TestClose(t *testing.T) {
|
||||||
createBlock(t, dir, genSeries(1, 1, 0, 10))
|
createBlock(t, dir, genSeries(1, 1, 0, 10))
|
||||||
createBlock(t, dir, genSeries(1, 1, 10, 20))
|
createBlock(t, dir, genSeries(1, 1, 10, 20))
|
||||||
|
|
||||||
db, err := Open(dir, nil, nil, DefaultOptions)
|
db, err := Open(dir, nil, nil, DefaultOptions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Opening test storage failed: %s", err)
|
t.Fatalf("Opening test storage failed: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -2084,7 +2186,7 @@ func TestClose(t *testing.T) {
|
||||||
testutil.Ok(t, db.Close())
|
testutil.Ok(t, db.Close())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
q, err := db.Querier(0, 20)
|
q, err := db.Querier(context.TODO(), 0, 20)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Ok(t, q.Close())
|
testutil.Ok(t, q.Close())
|
||||||
testutil.NotOk(t, q.Close())
|
testutil.NotOk(t, q.Close())
|
||||||
|
@ -2119,7 +2221,7 @@ func BenchmarkQueries(b *testing.B) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
queryTypes := make(map[string]Querier)
|
queryTypes := make(map[string]storage.Querier)
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, q := range queryTypes {
|
for _, q := range queryTypes {
|
||||||
// Can't run a check for error here as some of these will fail as
|
// Can't run a check for error here as some of these will fail as
|
||||||
|
@ -2162,7 +2264,7 @@ func BenchmarkQueries(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qs := []Querier{}
|
qs := make([]storage.Querier, 0, 10)
|
||||||
for x := 0; x <= 10; x++ {
|
for x := 0; x <= 10; x++ {
|
||||||
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
|
block, err := OpenBlock(nil, createBlock(b, dir, series), nil)
|
||||||
testutil.Ok(b, err)
|
testutil.Ok(b, err)
|
||||||
|
@ -2191,12 +2293,13 @@ func BenchmarkQueries(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchQuery(b *testing.B, expExpansions int, q Querier, selectors labels.Selector) {
|
func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors labels.Selector) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
ss, err := q.Select(selectors...)
|
ss, ws, err := q.Select(nil, selectors...)
|
||||||
testutil.Ok(b, err)
|
testutil.Ok(b, err)
|
||||||
|
testutil.Equals(b, 0, len(ws))
|
||||||
var actualExpansions int
|
var actualExpansions int
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
s := ss.At()
|
s := ss.At()
|
||||||
|
|
|
@ -65,7 +65,7 @@ func CreateBlock(samples []*MetricSample, dir string, mint, maxt int64, logger l
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
compactor, err := NewLeveledCompactor(context.Background(), nil, logger, DefaultOptions.BlockRanges, nil)
|
compactor, err := NewLeveledCompactor(context.Background(), nil, logger, ExponentialBlockRanges(DefaultBlockDuration, 3, 5), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,9 +18,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/storage/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,14 +33,14 @@ func New(t testutil.T) storage.Storage {
|
||||||
|
|
||||||
// Tests just load data for a series sequentially. Thus we
|
// Tests just load data for a series sequentially. Thus we
|
||||||
// need a long appendable window.
|
// need a long appendable window.
|
||||||
db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{
|
opts := tsdb.DefaultOptions()
|
||||||
MinBlockDuration: model.Duration(24 * time.Hour),
|
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||||
MaxBlockDuration: model.Duration(24 * time.Hour),
|
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||||
})
|
db, err := tsdb.Open(dir, nil, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Opening test storage failed: %s", err)
|
t.Fatalf("Opening test storage failed: %s", err)
|
||||||
}
|
}
|
||||||
return testStorage{Storage: tsdb.Adapter(db, int64(0)), dir: dir}
|
return testStorage{Storage: db, dir: dir}
|
||||||
}
|
}
|
||||||
|
|
||||||
type testStorage struct {
|
type testStorage struct {
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -80,12 +81,15 @@ const (
|
||||||
errorNotFound errorType = "not_found"
|
errorNotFound errorType = "not_found"
|
||||||
)
|
)
|
||||||
|
|
||||||
var remoteReadQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
var (
|
||||||
Namespace: namespace,
|
LocalhostRepresentations = []string{"127.0.0.1", "localhost"}
|
||||||
Subsystem: subsystem,
|
remoteReadQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Name: "remote_read_queries",
|
Namespace: namespace,
|
||||||
Help: "The current number of remote read queries being executed or waiting.",
|
Subsystem: subsystem,
|
||||||
})
|
Name: "remote_read_queries",
|
||||||
|
Help: "The current number of remote read queries being executed or waiting.",
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
type apiError struct {
|
type apiError struct {
|
||||||
typ errorType
|
typ errorType
|
||||||
|
@ -176,6 +180,7 @@ type API struct {
|
||||||
config func() config.Config
|
config func() config.Config
|
||||||
flagsMap map[string]string
|
flagsMap map[string]string
|
||||||
ready func(http.HandlerFunc) http.HandlerFunc
|
ready func(http.HandlerFunc) http.HandlerFunc
|
||||||
|
globalURLOptions GlobalURLOptions
|
||||||
|
|
||||||
db func() TSDBAdmin
|
db func() TSDBAdmin
|
||||||
enableAdmin bool
|
enableAdmin bool
|
||||||
|
@ -201,6 +206,7 @@ func NewAPI(
|
||||||
ar alertmanagerRetriever,
|
ar alertmanagerRetriever,
|
||||||
configFunc func() config.Config,
|
configFunc func() config.Config,
|
||||||
flagsMap map[string]string,
|
flagsMap map[string]string,
|
||||||
|
globalURLOptions GlobalURLOptions,
|
||||||
readyFunc func(http.HandlerFunc) http.HandlerFunc,
|
readyFunc func(http.HandlerFunc) http.HandlerFunc,
|
||||||
db func() TSDBAdmin,
|
db func() TSDBAdmin,
|
||||||
enableAdmin bool,
|
enableAdmin bool,
|
||||||
|
@ -223,6 +229,7 @@ func NewAPI(
|
||||||
config: configFunc,
|
config: configFunc,
|
||||||
flagsMap: flagsMap,
|
flagsMap: flagsMap,
|
||||||
ready: readyFunc,
|
ready: readyFunc,
|
||||||
|
globalURLOptions: globalURLOptions,
|
||||||
db: db,
|
db: db,
|
||||||
enableAdmin: enableAdmin,
|
enableAdmin: enableAdmin,
|
||||||
rulesRetriever: rr,
|
rulesRetriever: rr,
|
||||||
|
@ -584,6 +591,7 @@ type Target struct {
|
||||||
|
|
||||||
ScrapePool string `json:"scrapePool"`
|
ScrapePool string `json:"scrapePool"`
|
||||||
ScrapeURL string `json:"scrapeUrl"`
|
ScrapeURL string `json:"scrapeUrl"`
|
||||||
|
GlobalURL string `json:"globalUrl"`
|
||||||
|
|
||||||
LastError string `json:"lastError"`
|
LastError string `json:"lastError"`
|
||||||
LastScrape time.Time `json:"lastScrape"`
|
LastScrape time.Time `json:"lastScrape"`
|
||||||
|
@ -603,6 +611,54 @@ type TargetDiscovery struct {
|
||||||
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GlobalURLOptions contains fields used for deriving the global URL for local targets.
|
||||||
|
type GlobalURLOptions struct {
|
||||||
|
ListenAddress string
|
||||||
|
Host string
|
||||||
|
Scheme string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGlobalURL(u *url.URL, opts GlobalURLOptions) (*url.URL, error) {
|
||||||
|
host, port, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lhr := range LocalhostRepresentations {
|
||||||
|
if host == lhr {
|
||||||
|
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if port == ownPort {
|
||||||
|
// Only in the case where the target is on localhost and its port is
|
||||||
|
// the same as the one we're listening on, we know for sure that
|
||||||
|
// we're monitoring our own process and that we need to change the
|
||||||
|
// scheme, hostname, and port to the externally reachable ones as
|
||||||
|
// well. We shouldn't need to touch the path at all, since if a
|
||||||
|
// path prefix is defined, the path under which we scrape ourselves
|
||||||
|
// should already contain the prefix.
|
||||||
|
u.Scheme = opts.Scheme
|
||||||
|
u.Host = opts.Host
|
||||||
|
} else {
|
||||||
|
// Otherwise, we only know that localhost is not reachable
|
||||||
|
// externally, so we replace only the hostname by the one in the
|
||||||
|
// external URL. It could be the wrong hostname for the service on
|
||||||
|
// this port, but it's still the best possible guess.
|
||||||
|
host, _, err := net.SplitHostPort(opts.Host)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
u.Host = host + ":" + port
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (api *API) targets(r *http.Request) apiFuncResult {
|
func (api *API) targets(r *http.Request) apiFuncResult {
|
||||||
sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) {
|
sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) {
|
||||||
var n int
|
var n int
|
||||||
|
@ -642,12 +698,22 @@ func (api *API) targets(r *http.Request) apiFuncResult {
|
||||||
lastErrStr = lastErr.Error()
|
lastErrStr = lastErr.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions)
|
||||||
|
|
||||||
res.ActiveTargets = append(res.ActiveTargets, &Target{
|
res.ActiveTargets = append(res.ActiveTargets, &Target{
|
||||||
DiscoveredLabels: target.DiscoveredLabels().Map(),
|
DiscoveredLabels: target.DiscoveredLabels().Map(),
|
||||||
Labels: target.Labels().Map(),
|
Labels: target.Labels().Map(),
|
||||||
ScrapePool: key,
|
ScrapePool: key,
|
||||||
ScrapeURL: target.URL().String(),
|
ScrapeURL: target.URL().String(),
|
||||||
LastError: lastErrStr,
|
GlobalURL: globalURL.String(),
|
||||||
|
LastError: func() string {
|
||||||
|
if err == nil && lastErrStr == "" {
|
||||||
|
return ""
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Wrapf(err, lastErrStr).Error()
|
||||||
|
}
|
||||||
|
return lastErrStr
|
||||||
|
}(),
|
||||||
LastScrape: target.LastScrape(),
|
LastScrape: target.LastScrape(),
|
||||||
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
||||||
Health: target.Health(),
|
Health: target.Health(),
|
||||||
|
@ -1124,6 +1190,7 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
|
||||||
for i, query := range req.Queries {
|
for i, query := range req.Queries {
|
||||||
err := api.remoteReadQuery(ctx, query, externalLabels, func(querier storage.Querier, selectParams *storage.SelectParams, filteredMatchers []*labels.Matcher) error {
|
err := api.remoteReadQuery(ctx, query, externalLabels, func(querier storage.Querier, selectParams *storage.SelectParams, filteredMatchers []*labels.Matcher) error {
|
||||||
// The streaming API provides sorted series.
|
// The streaming API provides sorted series.
|
||||||
|
// TODO(bwplotka): Handle warnings via query log.
|
||||||
set, _, err := querier.SelectSorted(selectParams, filteredMatchers...)
|
set, _, err := querier.SelectSorted(selectParams, filteredMatchers...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1240,7 +1307,6 @@ func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, extern
|
||||||
level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error())
|
level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return seriesHandleFn(querier, selectParams, filteredMatchers)
|
return seriesHandleFn(querier, selectParams, filteredMatchers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -827,8 +827,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "blackbox",
|
ScrapePool: "blackbox",
|
||||||
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
||||||
|
GlobalURL: "http://localhost:9115/probe?target=example.com",
|
||||||
Health: "down",
|
Health: "down",
|
||||||
LastError: "failed",
|
LastError: "failed: missing port in address",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
LastScrapeDuration: 0.1,
|
LastScrapeDuration: 0.1,
|
||||||
},
|
},
|
||||||
|
@ -839,6 +840,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "test",
|
ScrapePool: "test",
|
||||||
ScrapeURL: "http://example.com:8080/metrics",
|
ScrapeURL: "http://example.com:8080/metrics",
|
||||||
|
GlobalURL: "http://example.com:8080/metrics",
|
||||||
Health: "up",
|
Health: "up",
|
||||||
LastError: "",
|
LastError: "",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
|
@ -871,8 +873,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "blackbox",
|
ScrapePool: "blackbox",
|
||||||
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
||||||
|
GlobalURL: "http://localhost:9115/probe?target=example.com",
|
||||||
Health: "down",
|
Health: "down",
|
||||||
LastError: "failed",
|
LastError: "failed: missing port in address",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
LastScrapeDuration: 0.1,
|
LastScrapeDuration: 0.1,
|
||||||
},
|
},
|
||||||
|
@ -883,6 +886,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "test",
|
ScrapePool: "test",
|
||||||
ScrapeURL: "http://example.com:8080/metrics",
|
ScrapeURL: "http://example.com:8080/metrics",
|
||||||
|
GlobalURL: "http://example.com:8080/metrics",
|
||||||
Health: "up",
|
Health: "up",
|
||||||
LastError: "",
|
LastError: "",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
|
@ -915,8 +919,9 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "blackbox",
|
ScrapePool: "blackbox",
|
||||||
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
ScrapeURL: "http://localhost:9115/probe?target=example.com",
|
||||||
|
GlobalURL: "http://localhost:9115/probe?target=example.com",
|
||||||
Health: "down",
|
Health: "down",
|
||||||
LastError: "failed",
|
LastError: "failed: missing port in address",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
LastScrapeDuration: 0.1,
|
LastScrapeDuration: 0.1,
|
||||||
},
|
},
|
||||||
|
@ -927,6 +932,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, testLabelAPI
|
||||||
},
|
},
|
||||||
ScrapePool: "test",
|
ScrapePool: "test",
|
||||||
ScrapeURL: "http://example.com:8080/metrics",
|
ScrapeURL: "http://example.com:8080/metrics",
|
||||||
|
GlobalURL: "http://example.com:8080/metrics",
|
||||||
Health: "up",
|
Health: "up",
|
||||||
LastError: "",
|
LastError: "",
|
||||||
LastScrape: scrapeStart,
|
LastScrape: scrapeStart,
|
||||||
|
@ -1708,10 +1714,20 @@ func TestStreamReadEndpoint(t *testing.T) {
|
||||||
matcher3, err := labels.NewMatcher(labels.MatchEqual, "foo", "bar1")
|
matcher3, err := labels.NewMatcher(labels.MatchEqual, "foo", "bar1")
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
query1, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectParams{Step: 0, Func: "avg"})
|
query1, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher2}, &storage.SelectParams{
|
||||||
|
Step: 1,
|
||||||
|
Func: "avg",
|
||||||
|
Start: 0,
|
||||||
|
End: 14400001,
|
||||||
|
})
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
query2, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectParams{Step: 0, Func: "avg"})
|
query2, err := remote.ToQuery(0, 14400001, []*labels.Matcher{matcher1, matcher3}, &storage.SelectParams{
|
||||||
|
Step: 1,
|
||||||
|
Func: "avg",
|
||||||
|
Start: 0,
|
||||||
|
End: 14400001,
|
||||||
|
})
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
req := &prompb.ReadRequest{
|
req := &prompb.ReadRequest{
|
||||||
|
|
|
@ -6,19 +6,21 @@ import EndpointLink from './EndpointLink';
|
||||||
describe('EndpointLink', () => {
|
describe('EndpointLink', () => {
|
||||||
it('renders a simple anchor if the endpoint has no query params', () => {
|
it('renders a simple anchor if the endpoint has no query params', () => {
|
||||||
const endpoint = 'http://100.104.208.71:15090/stats/prometheus';
|
const endpoint = 'http://100.104.208.71:15090/stats/prometheus';
|
||||||
const endpointLink = shallow(<EndpointLink endpoint={endpoint} />);
|
const globalURL = 'http://100.104.208.71:15090/stats/prometheus';
|
||||||
|
const endpointLink = shallow(<EndpointLink endpoint={endpoint} globalUrl={globalURL} />);
|
||||||
const anchor = endpointLink.find('a');
|
const anchor = endpointLink.find('a');
|
||||||
expect(anchor.prop('href')).toEqual(endpoint);
|
expect(anchor.prop('href')).toEqual(globalURL);
|
||||||
expect(anchor.children().text()).toEqual(endpoint);
|
expect(anchor.children().text()).toEqual(endpoint);
|
||||||
expect(endpointLink.find('br')).toHaveLength(0);
|
expect(endpointLink.find('br')).toHaveLength(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders an anchor targeting endpoint but with query param labels if the endpoint has query params', () => {
|
it('renders an anchor targeting endpoint but with query param labels if the endpoint has query params', () => {
|
||||||
const endpoint = 'http://100.99.128.71:9115/probe?module=http_2xx&target=http://some-service';
|
const endpoint = 'http://100.99.128.71:9115/probe?module=http_2xx&target=http://some-service';
|
||||||
const endpointLink = shallow(<EndpointLink endpoint={endpoint} />);
|
const globalURL = 'http://100.99.128.71:9115/probe?module=http_2xx&target=http://some-service';
|
||||||
|
const endpointLink = shallow(<EndpointLink endpoint={endpoint} globalUrl={globalURL} />);
|
||||||
const anchor = endpointLink.find('a');
|
const anchor = endpointLink.find('a');
|
||||||
const badges = endpointLink.find(Badge);
|
const badges = endpointLink.find(Badge);
|
||||||
expect(anchor.prop('href')).toEqual(endpoint);
|
expect(anchor.prop('href')).toEqual(globalURL);
|
||||||
expect(anchor.children().text()).toEqual('http://100.99.128.71:9115/probe');
|
expect(anchor.children().text()).toEqual('http://100.99.128.71:9115/probe');
|
||||||
expect(endpointLink.find('br')).toHaveLength(1);
|
expect(endpointLink.find('br')).toHaveLength(1);
|
||||||
expect(badges).toHaveLength(2);
|
expect(badges).toHaveLength(2);
|
||||||
|
@ -29,7 +31,7 @@ describe('EndpointLink', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders an alert if url is invalid', () => {
|
it('renders an alert if url is invalid', () => {
|
||||||
const endpointLink = shallow(<EndpointLink endpoint={'afdsacas'} />);
|
const endpointLink = shallow(<EndpointLink endpoint={'afdsacas'} globalUrl={'afdsacas'} />);
|
||||||
const err = endpointLink.find(Alert);
|
const err = endpointLink.find(Alert);
|
||||||
expect(err.render().text()).toEqual('Error: Invalid URL');
|
expect(err.render().text()).toEqual('Error: Invalid URL');
|
||||||
});
|
});
|
||||||
|
|
|
@ -3,9 +3,10 @@ import { Badge, Alert } from 'reactstrap';
|
||||||
|
|
||||||
export interface EndpointLinkProps {
|
export interface EndpointLinkProps {
|
||||||
endpoint: string;
|
endpoint: string;
|
||||||
|
globalUrl: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
const EndpointLink: FC<EndpointLinkProps> = ({ endpoint }) => {
|
const EndpointLink: FC<EndpointLinkProps> = ({ endpoint, globalUrl }) => {
|
||||||
let url: URL;
|
let url: URL;
|
||||||
try {
|
try {
|
||||||
url = new URL(endpoint);
|
url = new URL(endpoint);
|
||||||
|
@ -22,7 +23,7 @@ const EndpointLink: FC<EndpointLinkProps> = ({ endpoint }) => {
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<a href={endpoint}>{`${protocol}//${host}${pathname}`}</a>
|
<a href={globalUrl}>{`${protocol}//${host}${pathname}`}</a>
|
||||||
{params.length > 0 ? <br /> : null}
|
{params.length > 0 ? <br /> : null}
|
||||||
{params.map(([labelName, labelValue]: [string, string]) => {
|
{params.map(([labelName, labelValue]: [string, string]) => {
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -49,6 +49,7 @@ const ScrapePoolPanel: FC<PanelProps> = ({ scrapePool, targetGroup }) => {
|
||||||
labels,
|
labels,
|
||||||
scrapePool,
|
scrapePool,
|
||||||
scrapeUrl,
|
scrapeUrl,
|
||||||
|
globalUrl,
|
||||||
lastError,
|
lastError,
|
||||||
lastScrape,
|
lastScrape,
|
||||||
lastScrapeDuration,
|
lastScrapeDuration,
|
||||||
|
@ -59,7 +60,7 @@ const ScrapePoolPanel: FC<PanelProps> = ({ scrapePool, targetGroup }) => {
|
||||||
return (
|
return (
|
||||||
<tr key={scrapeUrl}>
|
<tr key={scrapeUrl}>
|
||||||
<td className={styles.endpoint}>
|
<td className={styles.endpoint}>
|
||||||
<EndpointLink endpoint={scrapeUrl} />
|
<EndpointLink endpoint={scrapeUrl} globalUrl={globalUrl} />
|
||||||
</td>
|
</td>
|
||||||
<td className={styles.state}>
|
<td className={styles.state}>
|
||||||
<Badge color={color}>{health.toUpperCase()}</Badge>
|
<Badge color={color}>{health.toUpperCase()}</Badge>
|
||||||
|
|
|
@ -20,6 +20,7 @@ export const targetGroups: ScrapePools = Object.freeze({
|
||||||
},
|
},
|
||||||
scrapePool: 'blackbox',
|
scrapePool: 'blackbox',
|
||||||
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=http%3A%2F%2Fprometheus.io',
|
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=http%3A%2F%2Fprometheus.io',
|
||||||
|
globalUrl: 'http://localhost.localdomain:9115/probe?module=http_2xx&target=http%3A%2F%2Fprometheus.io',
|
||||||
lastError: '',
|
lastError: '',
|
||||||
lastScrape: '2019-11-04T11:52:14.759299-07:00',
|
lastScrape: '2019-11-04T11:52:14.759299-07:00',
|
||||||
lastScrapeDuration: 36560147,
|
lastScrapeDuration: 36560147,
|
||||||
|
@ -39,6 +40,7 @@ export const targetGroups: ScrapePools = Object.freeze({
|
||||||
},
|
},
|
||||||
scrapePool: 'blackbox',
|
scrapePool: 'blackbox',
|
||||||
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=https%3A%2F%2Fprometheus.io',
|
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=https%3A%2F%2Fprometheus.io',
|
||||||
|
globalUrl: 'http://localhost.localdomain:9115/probe?module=http_2xx&target=https%3A%2F%2Fprometheus.io',
|
||||||
lastError: '',
|
lastError: '',
|
||||||
lastScrape: '2019-11-04T11:52:24.731096-07:00',
|
lastScrape: '2019-11-04T11:52:24.731096-07:00',
|
||||||
lastScrapeDuration: 49448763,
|
lastScrapeDuration: 49448763,
|
||||||
|
@ -58,6 +60,7 @@ export const targetGroups: ScrapePools = Object.freeze({
|
||||||
},
|
},
|
||||||
scrapePool: 'blackbox',
|
scrapePool: 'blackbox',
|
||||||
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=http%3A%2F%2Fexample.com%3A8080',
|
scrapeUrl: 'http://127.0.0.1:9115/probe?module=http_2xx&target=http%3A%2F%2Fexample.com%3A8080',
|
||||||
|
globalUrl: 'http://localhost.localdomain:9115/probe?module=http_2xx&target=http%3A%2F%2Fexample.com%3A8080',
|
||||||
lastError: '',
|
lastError: '',
|
||||||
lastScrape: '2019-11-04T11:52:13.516654-07:00',
|
lastScrape: '2019-11-04T11:52:13.516654-07:00',
|
||||||
lastScrapeDuration: 120916592,
|
lastScrapeDuration: 120916592,
|
||||||
|
@ -81,6 +84,7 @@ export const targetGroups: ScrapePools = Object.freeze({
|
||||||
},
|
},
|
||||||
scrapePool: 'node_exporter',
|
scrapePool: 'node_exporter',
|
||||||
scrapeUrl: 'http://localhost:9100/metrics',
|
scrapeUrl: 'http://localhost:9100/metrics',
|
||||||
|
globalUrl: 'http://localhost.localdomain:9100/metrics',
|
||||||
lastError: '',
|
lastError: '',
|
||||||
lastScrape: '2019-11-04T11:52:14.145703-07:00',
|
lastScrape: '2019-11-04T11:52:14.145703-07:00',
|
||||||
lastScrapeDuration: 3842307,
|
lastScrapeDuration: 3842307,
|
||||||
|
@ -104,6 +108,7 @@ export const targetGroups: ScrapePools = Object.freeze({
|
||||||
},
|
},
|
||||||
scrapePool: 'prometheus',
|
scrapePool: 'prometheus',
|
||||||
scrapeUrl: 'http://localhost:9090/metrics',
|
scrapeUrl: 'http://localhost:9090/metrics',
|
||||||
|
globalUrl: 'http://localhost.localdomain:9000/metrics',
|
||||||
lastError: '',
|
lastError: '',
|
||||||
lastScrape: '2019-11-04T11:52:18.479731-07:00',
|
lastScrape: '2019-11-04T11:52:18.479731-07:00',
|
||||||
lastScrapeDuration: 4050976,
|
lastScrapeDuration: 4050976,
|
||||||
|
|
|
@ -7,6 +7,7 @@ export interface Target {
|
||||||
labels: Labels;
|
labels: Labels;
|
||||||
scrapePool: string;
|
scrapePool: string;
|
||||||
scrapeUrl: string;
|
scrapeUrl: string;
|
||||||
|
globalUrl: string;
|
||||||
lastError: string;
|
lastError: string;
|
||||||
lastScrape: string;
|
lastScrape: string;
|
||||||
lastScrapeDuration: number;
|
lastScrapeDuration: number;
|
||||||
|
|
80
web/web.go
80
web/web.go
|
@ -38,6 +38,7 @@ import (
|
||||||
template_text "text/template"
|
template_text "text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/kit/log"
|
"github.com/go-kit/kit/log"
|
||||||
"github.com/go-kit/kit/log/level"
|
"github.com/go-kit/kit/log/level"
|
||||||
conntrack "github.com/mwitkow/go-conntrack"
|
conntrack "github.com/mwitkow/go-conntrack"
|
||||||
|
@ -62,7 +63,6 @@ import (
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
prometheus_tsdb "github.com/prometheus/prometheus/storage/tsdb"
|
|
||||||
"github.com/prometheus/prometheus/template"
|
"github.com/prometheus/prometheus/template"
|
||||||
"github.com/prometheus/prometheus/util/httputil"
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
api_v1 "github.com/prometheus/prometheus/web/api/v1"
|
api_v1 "github.com/prometheus/prometheus/web/api/v1"
|
||||||
|
@ -70,24 +70,20 @@ import (
|
||||||
"github.com/prometheus/prometheus/web/ui"
|
"github.com/prometheus/prometheus/web/ui"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
|
||||||
localhostRepresentations = []string{"127.0.0.1", "localhost"}
|
var reactRouterPaths = []string{
|
||||||
|
"/",
|
||||||
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
|
"/alerts",
|
||||||
reactRouterPaths = []string{
|
"/config",
|
||||||
"/",
|
"/flags",
|
||||||
"/alerts",
|
"/graph",
|
||||||
"/config",
|
"/rules",
|
||||||
"/flags",
|
"/service-discovery",
|
||||||
"/graph",
|
"/status",
|
||||||
"/rules",
|
"/targets",
|
||||||
"/service-discovery",
|
"/tsdb-status",
|
||||||
"/status",
|
"/version",
|
||||||
"/targets",
|
}
|
||||||
"/tsdb-status",
|
|
||||||
"/version",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// withStackTrace logs the stack trace in case the request panics. The function
|
// withStackTrace logs the stack trace in case the request panics. The function
|
||||||
// will re-raise the error which will then be handled by the net/http package.
|
// will re-raise the error which will then be handled by the net/http package.
|
||||||
|
@ -215,17 +211,18 @@ func (h *Handler) ApplyConfig(conf *config.Config) error {
|
||||||
|
|
||||||
// Options for the web Handler.
|
// Options for the web Handler.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Context context.Context
|
Context context.Context
|
||||||
TSDB func() *tsdb.DB
|
TSDB func() *tsdb.DB
|
||||||
TSDBCfg prometheus_tsdb.Options
|
TSDBRetentionDuration model.Duration
|
||||||
Storage storage.Storage
|
TSDBMaxBytes units.Base2Bytes
|
||||||
QueryEngine *promql.Engine
|
Storage storage.Storage
|
||||||
LookbackDelta time.Duration
|
QueryEngine *promql.Engine
|
||||||
ScrapeManager *scrape.Manager
|
LookbackDelta time.Duration
|
||||||
RuleManager *rules.Manager
|
ScrapeManager *scrape.Manager
|
||||||
Notifier *notifier.Manager
|
RuleManager *rules.Manager
|
||||||
Version *PrometheusVersion
|
Notifier *notifier.Manager
|
||||||
Flags map[string]string
|
Version *PrometheusVersion
|
||||||
|
Flags map[string]string
|
||||||
|
|
||||||
ListenAddress string
|
ListenAddress string
|
||||||
CORSOrigin *regexp.Regexp
|
CORSOrigin *regexp.Regexp
|
||||||
|
@ -300,6 +297,11 @@ func New(logger log.Logger, o *Options) *Handler {
|
||||||
return *h.config
|
return *h.config
|
||||||
},
|
},
|
||||||
o.Flags,
|
o.Flags,
|
||||||
|
api_v1.GlobalURLOptions{
|
||||||
|
ListenAddress: o.ListenAddress,
|
||||||
|
Host: o.ExternalURL.Host,
|
||||||
|
Scheme: o.ExternalURL.Scheme,
|
||||||
|
},
|
||||||
h.testReady,
|
h.testReady,
|
||||||
func() api_v1.TSDBAdmin {
|
func() api_v1.TSDBAdmin {
|
||||||
return h.options.TSDB()
|
return h.options.TSDB()
|
||||||
|
@ -756,14 +758,14 @@ func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
|
||||||
GODEBUG: os.Getenv("GODEBUG"),
|
GODEBUG: os.Getenv("GODEBUG"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.options.TSDBCfg.RetentionDuration != 0 {
|
if h.options.TSDBRetentionDuration != 0 {
|
||||||
status.StorageRetention = h.options.TSDBCfg.RetentionDuration.String()
|
status.StorageRetention = h.options.TSDBRetentionDuration.String()
|
||||||
}
|
}
|
||||||
if h.options.TSDBCfg.MaxBytes != 0 {
|
if h.options.TSDBMaxBytes != 0 {
|
||||||
if status.StorageRetention != "" {
|
if status.StorageRetention != "" {
|
||||||
status.StorageRetention = status.StorageRetention + " or "
|
status.StorageRetention = status.StorageRetention + " or "
|
||||||
}
|
}
|
||||||
status.StorageRetention = status.StorageRetention + h.options.TSDBCfg.MaxBytes.String()
|
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics, err := h.gatherer.Gather()
|
metrics, err := h.gatherer.Gather()
|
||||||
|
@ -806,14 +808,14 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
|
||||||
GODEBUG: os.Getenv("GODEBUG"),
|
GODEBUG: os.Getenv("GODEBUG"),
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.options.TSDBCfg.RetentionDuration != 0 {
|
if h.options.TSDBRetentionDuration != 0 {
|
||||||
status.StorageRetention = h.options.TSDBCfg.RetentionDuration.String()
|
status.StorageRetention = h.options.TSDBRetentionDuration.String()
|
||||||
}
|
}
|
||||||
if h.options.TSDBCfg.MaxBytes != 0 {
|
if h.options.TSDBMaxBytes != 0 {
|
||||||
if status.StorageRetention != "" {
|
if status.StorageRetention != "" {
|
||||||
status.StorageRetention = status.StorageRetention + " or "
|
status.StorageRetention = status.StorageRetention + " or "
|
||||||
}
|
}
|
||||||
status.StorageRetention = status.StorageRetention + h.options.TSDBCfg.MaxBytes.String()
|
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics, err := h.gatherer.Gather()
|
metrics, err := h.gatherer.Gather()
|
||||||
|
@ -973,7 +975,7 @@ func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
for _, lhr := range localhostRepresentations {
|
for _, lhr := range api_v1.LocalhostRepresentations {
|
||||||
if host == lhr {
|
if host == lhr {
|
||||||
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
|
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -28,13 +28,11 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/storage/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
libtsdb "github.com/prometheus/prometheus/tsdb"
|
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +96,7 @@ func TestReadyAndHealthy(t *testing.T) {
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
defer os.RemoveAll(dbDir)
|
defer os.RemoveAll(dbDir)
|
||||||
db, err := libtsdb.Open(dbDir, nil, nil, nil)
|
db, err := tsdb.Open(dbDir, nil, nil, nil)
|
||||||
|
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
@ -107,14 +105,14 @@ func TestReadyAndHealthy(t *testing.T) {
|
||||||
ReadTimeout: 30 * time.Second,
|
ReadTimeout: 30 * time.Second,
|
||||||
MaxConnections: 512,
|
MaxConnections: 512,
|
||||||
Context: nil,
|
Context: nil,
|
||||||
Storage: &tsdb.ReadyStorage{},
|
Storage: nil,
|
||||||
QueryEngine: nil,
|
QueryEngine: nil,
|
||||||
ScrapeManager: &scrape.Manager{},
|
ScrapeManager: &scrape.Manager{},
|
||||||
RuleManager: &rules.Manager{},
|
RuleManager: &rules.Manager{},
|
||||||
Notifier: nil,
|
Notifier: nil,
|
||||||
RoutePrefix: "/",
|
RoutePrefix: "/",
|
||||||
EnableAdminAPI: true,
|
EnableAdminAPI: true,
|
||||||
TSDB: func() *libtsdb.DB { return db },
|
TSDB: func() *tsdb.DB { return db },
|
||||||
ExternalURL: &url.URL{
|
ExternalURL: &url.URL{
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
Host: "localhost:9090",
|
Host: "localhost:9090",
|
||||||
|
@ -289,7 +287,7 @@ func TestRoutePrefix(t *testing.T) {
|
||||||
|
|
||||||
defer os.RemoveAll(dbDir)
|
defer os.RemoveAll(dbDir)
|
||||||
|
|
||||||
db, err := libtsdb.Open(dbDir, nil, nil, nil)
|
db, err := tsdb.Open(dbDir, nil, nil, nil)
|
||||||
|
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
@ -298,14 +296,18 @@ func TestRoutePrefix(t *testing.T) {
|
||||||
ReadTimeout: 30 * time.Second,
|
ReadTimeout: 30 * time.Second,
|
||||||
MaxConnections: 512,
|
MaxConnections: 512,
|
||||||
Context: nil,
|
Context: nil,
|
||||||
Storage: &tsdb.ReadyStorage{},
|
Storage: nil,
|
||||||
QueryEngine: nil,
|
QueryEngine: nil,
|
||||||
ScrapeManager: nil,
|
ScrapeManager: nil,
|
||||||
RuleManager: nil,
|
RuleManager: nil,
|
||||||
Notifier: nil,
|
Notifier: nil,
|
||||||
RoutePrefix: "/prometheus",
|
RoutePrefix: "/prometheus",
|
||||||
EnableAdminAPI: true,
|
EnableAdminAPI: true,
|
||||||
TSDB: func() *libtsdb.DB { return db },
|
ExternalURL: &url.URL{
|
||||||
|
Host: "localhost.localdomain:9090",
|
||||||
|
Scheme: "http",
|
||||||
|
},
|
||||||
|
TSDB: func() *tsdb.DB { return db },
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.Flags = map[string]string{}
|
opts.Flags = map[string]string{}
|
||||||
|
@ -391,7 +393,12 @@ func TestDebugHandler(t *testing.T) {
|
||||||
{"/foo", "/bar/debug/pprof/goroutine", 404},
|
{"/foo", "/bar/debug/pprof/goroutine", 404},
|
||||||
} {
|
} {
|
||||||
opts := &Options{
|
opts := &Options{
|
||||||
RoutePrefix: tc.prefix,
|
RoutePrefix: tc.prefix,
|
||||||
|
ListenAddress: "somehost:9090",
|
||||||
|
ExternalURL: &url.URL{
|
||||||
|
Host: "localhost.localdomain:9090",
|
||||||
|
Scheme: "http",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
handler := New(nil, opts)
|
handler := New(nil, opts)
|
||||||
handler.Ready()
|
handler.Ready()
|
||||||
|
@ -411,7 +418,14 @@ func TestDebugHandler(t *testing.T) {
|
||||||
func TestHTTPMetrics(t *testing.T) {
|
func TestHTTPMetrics(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
handler := New(nil, &Options{RoutePrefix: "/"})
|
handler := New(nil, &Options{
|
||||||
|
RoutePrefix: "/",
|
||||||
|
ListenAddress: "somehost:9090",
|
||||||
|
ExternalURL: &url.URL{
|
||||||
|
Host: "localhost.localdomain:9090",
|
||||||
|
Scheme: "http",
|
||||||
|
},
|
||||||
|
})
|
||||||
getReady := func() int {
|
getReady := func() int {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
|
|
Loading…
Reference in a new issue