Merge remote-tracking branch 'prometheus/main' into chore/sync-prometheus

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2023-09-18 09:02:47 +02:00
commit e48d4e5835
101 changed files with 2158 additions and 1772 deletions

View file

@ -27,7 +27,10 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: "monthly" interval: "monthly"
open-pull-requests-limit: 0 - package-ecosystem: "github-actions"
directory: "/scripts"
schedule:
interval: "monthly"
- package-ecosystem: "docker" - package-ecosystem: "docker"
directory: "/" directory: "/"
schedule: schedule:

View file

@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: dessant/lock-threads@v4 - uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1
with: with:
process-only: 'issues' process-only: 'issues'
issue-inactive-days: '180' issue-inactive-days: '180'

View file

@ -1,5 +1,27 @@
# Changelog # Changelog
## 2.47.0 / 2023-09-06
This release adds an experimental OpenTelemetry (OTLP) Ingestion feature,
and also new setting `keep_dropped_targets` to limit the amount of dropped
targets held in memory. This defaults to 0 meaning 'no limit', so we encourage
users with large Prometheus to try setting a limit such as 100.
* [FEATURE] Web: Add OpenTelemetry (OTLP) Ingestion endpoint. #12571 #12643
* [FEATURE] Scraping: Optionally limit detail on dropped targets, to save memory. #12647
* [ENHANCEMENT] TSDB: Write head chunks to disk in the background to reduce blocking. #11818
* [ENHANCEMENT] PromQL: Speed up aggregate and function queries. #12682
* [ENHANCEMENT] PromQL: More efficient evaluation of query with `timestamp()`. #12579
* [ENHANCEMENT] API: Faster streaming of Labels to JSON. #12598
* [ENHANCEMENT] Agent: Memory pooling optimisation. #12651
* [ENHANCEMENT] TSDB: Prevent storage space leaks due to terminated snapshots on shutdown. #12664
* [ENHANCEMENT] Histograms: Refactoring and optimisations. #12352 #12584 #12596 #12711 #12054
* [ENHANCEMENT] Histograms: Add `histogram_stdvar` and `histogram_stddev` functions. #12614
* [ENHANCEMENT] Remote-write: add http.resend_count tracing attribute. #12676
* [ENHANCEMENT] TSDB: Support native histograms in snapshot on shutdown. #12722
* [BUGFIX] TSDB/Agent: ensure that new series get written to WAL on rollback. #12592
* [BUGFIX] Scraping: fix infinite loop on exemplar in protobuf format. #12737
## 2.46.0 / 2023-07-25 ## 2.46.0 / 2023-07-25
* [FEATURE] Promtool: Add PromQL format and label matcher set/delete commands to promtool. #11411 * [FEATURE] Promtool: Add PromQL format and label matcher set/delete commands to promtool. #11411

View file

@ -1 +1 @@
2.46.0 2.47.0

View file

@ -1378,17 +1378,17 @@ func (s *readyStorage) StartTime() (int64, error) {
} }
// Querier implements the Storage interface. // Querier implements the Storage interface.
func (s *readyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (s *readyStorage) Querier(mint, maxt int64) (storage.Querier, error) {
if x := s.get(); x != nil { if x := s.get(); x != nil {
return x.Querier(ctx, mint, maxt) return x.Querier(mint, maxt)
} }
return nil, tsdb.ErrNotReady return nil, tsdb.ErrNotReady
} }
// ChunkQuerier implements the Storage interface. // ChunkQuerier implements the Storage interface.
func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (s *readyStorage) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
if x := s.get(); x != nil { if x := s.get(); x != nil {
return x.ChunkQuerier(ctx, mint, maxt) return x.ChunkQuerier(mint, maxt)
} }
return nil, tsdb.ErrNotReady return nil, tsdb.ErrNotReady
} }
@ -1461,11 +1461,11 @@ func (s *readyStorage) CleanTombstones() error {
} }
// Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces. // Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error { func (s *readyStorage) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
if x := s.get(); x != nil { if x := s.get(); x != nil {
switch db := x.(type) { switch db := x.(type) {
case *tsdb.DB: case *tsdb.DB:
return db.Delete(mint, maxt, ms...) return db.Delete(ctx, mint, maxt, ms...)
case *agent.DB: case *agent.DB:
return agent.ErrUnsupported return agent.ErrUnsupported
default: default:

View file

@ -45,7 +45,7 @@ func sortSamples(samples []backfillSample) {
} }
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{} samples := []backfillSample{}
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
@ -67,7 +67,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i) require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
} }
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
require.NoError(t, q.Close()) require.NoError(t, q.Close())

View file

@ -86,6 +86,8 @@ func main() {
httpConfigFilePath string httpConfigFilePath string
) )
ctx := context.Background()
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout) app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
app.Version(version.Print("promtool")) app.Version(version.Print("promtool"))
app.HelpFlag.Short('h') app.HelpFlag.Short('h')
@ -370,13 +372,13 @@ func main() {
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes))) os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
case tsdbAnalyzeCmd.FullCommand(): case tsdbAnalyzeCmd.FullCommand():
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended))) os.Exit(checkErr(analyzeBlock(ctx, *analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended)))
case tsdbListCmd.FullCommand(): case tsdbListCmd.FullCommand():
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand(): case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch))) os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch)))
// TODO(aSquare14): Work on adding support for custom block size. // TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand(): case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))

View file

@ -124,10 +124,10 @@ func TestBackfillRuleIntegration(t *testing.T) {
blocks := db.Blocks() blocks := db.Blocks()
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks)) require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
var seriesCount, samplesCount int var seriesCount, samplesCount int
for selectedSeries.Next() { for selectedSeries.Next() {
seriesCount++ seriesCount++
@ -248,11 +248,11 @@ func TestBackfillLabels(t *testing.T) {
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil) db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
require.NoError(t, err) require.NoError(t, err)
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
t.Run("correct-labels", func(t *testing.T) { t.Run("correct-labels", func(t *testing.T) {
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")) selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
for selectedSeries.Next() { for selectedSeries.Next() {
series := selectedSeries.At() series := selectedSeries.At()
expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule") expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule")

View file

@ -413,7 +413,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
return db, b, nil return db, b, nil
} }
func analyzeBlock(path, blockID string, limit int, runExtended bool) error { func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExtended bool) error {
db, block, err := openBlock(path, blockID) db, block, err := openBlock(path, blockID)
if err != nil { if err != nil {
return err return err
@ -433,7 +433,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
} }
defer ir.Close() defer ir.Close()
allLabelNames, err := ir.LabelNames() allLabelNames, err := ir.LabelNames(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -460,7 +460,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
labelpairsUncovered := map[string]uint64{} labelpairsUncovered := map[string]uint64{}
labelpairsCount := map[string]uint64{} labelpairsCount := map[string]uint64{}
entries := 0 entries := 0
p, err := ir.Postings("", "") // The special all key. p, err := ir.Postings(ctx, "", "") // The special all key.
if err != nil { if err != nil {
return err return err
} }
@ -512,7 +512,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
for _, n := range allLabelNames { for _, n := range allLabelNames {
values, err := ir.SortedLabelValues(n) values, err := ir.SortedLabelValues(ctx, n)
if err != nil { if err != nil {
return err return err
} }
@ -528,7 +528,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
for _, n := range allLabelNames { for _, n := range allLabelNames {
lv, err := ir.SortedLabelValues(n) lv, err := ir.SortedLabelValues(ctx, n)
if err != nil { if err != nil {
return err return err
} }
@ -538,12 +538,12 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
printInfo(postingInfos) printInfo(postingInfos)
postingInfos = postingInfos[:0] postingInfos = postingInfos[:0]
lv, err := ir.SortedLabelValues("__name__") lv, err := ir.SortedLabelValues(ctx, "__name__")
if err != nil { if err != nil {
return err return err
} }
for _, n := range lv { for _, n := range lv {
postings, err := ir.Postings("__name__", n) postings, err := ir.Postings(ctx, "__name__", n)
if err != nil { if err != nil {
return err return err
} }
@ -560,14 +560,15 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
printInfo(postingInfos) printInfo(postingInfos)
if runExtended { if runExtended {
return analyzeCompaction(block, ir) return analyzeCompaction(ctx, block, ir)
} }
return nil return nil
} }
func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) { func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) {
postingsr, err := indexr.Postings(index.AllPostingsKey()) n, v := index.AllPostingsKey()
postingsr, err := indexr.Postings(ctx, n, v)
if err != nil { if err != nil {
return err return err
} }
@ -619,7 +620,7 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
return nil return nil
} }
func dumpSamples(path string, mint, maxt int64, match string) (err error) { func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(path, nil)
if err != nil { if err != nil {
return err return err
@ -627,7 +628,7 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
defer func() { defer func() {
err = tsdb_errors.NewMulti(err, db.Close()).Err() err = tsdb_errors.NewMulti(err, db.Close()).Err()
}() }()
q, err := db.Querier(context.TODO(), mint, maxt) q, err := db.Querier(mint, maxt)
if err != nil { if err != nil {
return err return err
} }
@ -637,7 +638,7 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
if err != nil { if err != nil {
return err return err
} }
ss := q.Select(false, nil, matchers...) ss := q.Select(ctx, false, nil, matchers...)
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()
@ -661,7 +662,7 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
} }
if ws := ss.Warnings(); len(ws) > 0 { if ws := ss.Warnings(); len(ws) > 0 {
return tsdb_errors.NewMulti(ws...).Err() return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
} }
if ss.Err() != nil { if ss.Err() != nil {

View file

@ -1745,6 +1745,14 @@ var expectedErrors = []struct {
filename: "ec2_filters_empty_values.bad.yml", filename: "ec2_filters_empty_values.bad.yml",
errMsg: `EC2 SD configuration filter values cannot be empty`, errMsg: `EC2 SD configuration filter values cannot be empty`,
}, },
{
filename: "ec2_token_file.bad.yml",
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
},
{
filename: "lightsail_token_file.bad.yml",
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
},
{ {
filename: "section_key_dup.bad.yml", filename: "section_key_dup.bad.yml",
errMsg: "field scrape_configs already set in type config.plain", errMsg: "field scrape_configs already set in type config.plain",
@ -1769,6 +1777,10 @@ var expectedErrors = []struct {
filename: "azure_authentication_method.bad.yml", filename: "azure_authentication_method.bad.yml",
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"", errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
}, },
{
filename: "azure_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
},
{ {
filename: "empty_scrape_config.bad.yml", filename: "empty_scrape_config.bad.yml",
errMsg: "empty or null scrape config section", errMsg: "empty or null scrape config section",
@ -1821,6 +1833,10 @@ var expectedErrors = []struct {
filename: "puppetdb_no_scheme.bad.yml", filename: "puppetdb_no_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'", errMsg: "URL scheme must be 'http' or 'https'",
}, },
{
filename: "puppetdb_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{ {
filename: "hetzner_role.bad.yml", filename: "hetzner_role.bad.yml",
errMsg: "unknown role", errMsg: "unknown role",
@ -1857,6 +1873,10 @@ var expectedErrors = []struct {
filename: "http_url_no_host.bad.yml", filename: "http_url_no_host.bad.yml",
errMsg: "host is missing in URL", errMsg: "host is missing in URL",
}, },
{
filename: "http_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{ {
filename: "http_url_bad_scheme.bad.yml", filename: "http_url_bad_scheme.bad.yml",
errMsg: "URL scheme must be 'http' or 'https'", errMsg: "URL scheme must be 'http' or 'https'",
@ -1885,6 +1905,10 @@ var expectedErrors = []struct {
filename: "uyuni_no_server.bad.yml", filename: "uyuni_no_server.bad.yml",
errMsg: "Uyuni SD configuration requires server host", errMsg: "Uyuni SD configuration requires server host",
}, },
{
filename: "uyuni_token_file.bad.yml",
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
},
{ {
filename: "ionos_datacenter.bad.yml", filename: "ionos_datacenter.bad.yml",
errMsg: "datacenter id can't be empty", errMsg: "datacenter id can't be empty",

View file

@ -0,0 +1,11 @@
scrape_configs:
- job_name: prometheus
azure_sd_configs:
- subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
client_secret: mysecret
bearer_token: 1234
basic_auth:
username: user
password: password

View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: foo
ec2_sd_configs:
- region: us-east-1
bearer_token: foo
bearer_token_file: foo

View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: foo
http_sd_configs:
- url: http://foo
bearer_token: foo
bearer_token_file: foo

View file

@ -0,0 +1,6 @@
scrape_configs:
- job_name: foo
lightsail_sd_configs:
- region: us-east-1
bearer_token: foo
bearer_token_file: foo

View file

@ -0,0 +1,7 @@
scrape_configs:
- job_name: puppetdb
puppetdb_sd_configs:
- url: http://puppet
query: 'resources { type = "Package" and title = "httpd" }'
bearer_token: foo
bearer_token_file: foo

View file

@ -0,0 +1,8 @@
scrape_configs:
- job_name: uyuni
uyuni_sd_configs:
- server: "server"
username: "username"
password: "password"
bearer_token: foo
bearer_token_file: foo

View file

@ -129,7 +129,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return errors.New("EC2 SD configuration filter values cannot be empty") return errors.New("EC2 SD configuration filter values cannot be empty")
} }
} }
return nil return c.HTTPClientConfig.Validate()
} }
// EC2Discovery periodically performs EC2-SD requests. It implements // EC2Discovery periodically performs EC2-SD requests. It implements

View file

@ -109,7 +109,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
} }
c.Region = region c.Region = region
} }
return nil return c.HTTPClientConfig.Validate()
} }
// LightsailDiscovery periodically performs Lightsail-SD requests. It implements // LightsailDiscovery periodically performs Lightsail-SD requests. It implements

View file

@ -144,7 +144,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity)
} }
return nil return c.HTTPClientConfig.Validate()
} }
type Discovery struct { type Discovery struct {

View file

@ -99,7 +99,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if parsedURL.Host == "" { if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL") return fmt.Errorf("host is missing in URL")
} }
return nil return c.HTTPClientConfig.Validate()
} }
const httpSDURLLabel = model.MetaLabelPrefix + "url" const httpSDURLLabel = model.MetaLabelPrefix + "url"

View file

@ -115,7 +115,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Query == "" { if c.Query == "" {
return fmt.Errorf("query missing") return fmt.Errorf("query missing")
} }
return nil return c.HTTPClientConfig.Validate()
} }
// Discovery provides service discovery functionality based // Discovery provides service discovery functionality based

View file

@ -146,7 +146,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
if c.Password == "" { if c.Password == "" {
return errors.New("Uyuni SD configuration requires a password") return errors.New("Uyuni SD configuration requires a password")
} }
return nil return c.HTTPClientConfig.Validate()
} }
func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) { func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) {

14
go.mod
View file

@ -69,7 +69,7 @@ require (
go.uber.org/automaxprocs v1.5.2 go.uber.org/automaxprocs v1.5.2
go.uber.org/goleak v1.2.1 go.uber.org/goleak v1.2.1
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/net v0.12.0 golang.org/x/net v0.13.0
golang.org/x/oauth2 v0.10.0 golang.org/x/oauth2 v0.10.0
golang.org/x/sync v0.3.0 golang.org/x/sync v0.3.0
golang.org/x/sys v0.10.0 golang.org/x/sys v0.10.0
@ -81,9 +81,9 @@ require (
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.27.3 k8s.io/api v0.28.1
k8s.io/apimachinery v0.27.3 k8s.io/apimachinery v0.28.1
k8s.io/client-go v0.27.3 k8s.io/client-go v0.28.1
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.100.1 k8s.io/klog/v2 v2.100.1
) )
@ -94,6 +94,7 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/s2a-go v0.1.4 // indirect github.com/google/s2a-go v0.1.4 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
@ -124,7 +125,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/fatih/color v1.15.0 // indirect github.com/fatih/color v1.15.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/ghodss/yaml v1.0.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect
@ -144,7 +145,6 @@ require (
github.com/golang/glog v1.1.0 // indirect github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
@ -196,7 +196,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect

53
go.sum
View file

@ -74,7 +74,6 @@ github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2Qc
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
@ -114,13 +113,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -134,7 +131,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@ -173,7 +169,6 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@ -190,15 +185,14 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
@ -206,7 +200,6 @@ github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -276,7 +269,7 @@ github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPr
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@ -354,8 +347,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -525,7 +518,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@ -622,9 +614,9 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
@ -735,13 +727,11 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@ -777,9 +767,6 @@ github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3k
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
@ -936,18 +923,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1023,7 +1008,6 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1181,7 +1165,6 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw=
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM= google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM=
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE= google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE=
@ -1207,7 +1190,6 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
@ -1223,7 +1205,6 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
@ -1271,14 +1252,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108=
k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg=
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY=
k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8=
k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE=
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

View file

@ -28,6 +28,8 @@ import (
) )
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error {
ctx := context.Background()
metrics := []labels.Labels{} metrics := []labels.Labels{}
metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
metrics = append(metrics, labels.FromStrings("__name__", "b_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
@ -67,7 +69,7 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval,
} }
} }
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
stor.DB.Compact() stor.DB.Compact(ctx)
return nil return nil
} }
@ -154,7 +156,8 @@ func rangeQueryCases() []benchCase {
expr: "sum by (le)(h_X)", expr: "sum by (le)(h_X)",
}, },
{ {
expr: "count_values('value', h_X)", expr: "count_values('value', h_X)",
steps: 100,
}, },
{ {
expr: "topk(1, a_X)", expr: "topk(1, a_X)",
@ -214,7 +217,6 @@ func rangeQueryCases() []benchCase {
tmp = append(tmp, c) tmp = append(tmp, c)
} else { } else {
tmp = append(tmp, benchCase{expr: c.expr, steps: 1}) tmp = append(tmp, benchCase{expr: c.expr, steps: 1})
tmp = append(tmp, benchCase{expr: c.expr, steps: 10})
tmp = append(tmp, benchCase{expr: c.expr, steps: 100}) tmp = append(tmp, benchCase{expr: c.expr, steps: 100})
tmp = append(tmp, benchCase{expr: c.expr, steps: 1000}) tmp = append(tmp, benchCase{expr: c.expr, steps: 1000})
} }

View file

@ -44,6 +44,7 @@ import (
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/zeropool" "github.com/prometheus/prometheus/util/zeropool"
) )
@ -574,7 +575,7 @@ func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
// //
// At this point per query only one EvalStmt is evaluated. Alert and record // At this point per query only one EvalStmt is evaluated. Alert and record
// statements are not handled by the Engine. // statements are not handled by the Engine.
func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storage.Warnings, err error) { func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annotations.Annotations, err error) {
ng.metrics.currentQueries.Inc() ng.metrics.currentQueries.Inc()
defer func() { defer func() {
ng.metrics.currentQueries.Dec() ng.metrics.currentQueries.Dec()
@ -667,17 +668,17 @@ func durationMilliseconds(d time.Duration) int64 {
} }
// execEvalStmt evaluates the expression of an evaluation statement for the given time range. // execEvalStmt evaluates the expression of an evaluation statement for the given time range.
func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, annotations.Annotations, error) {
prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime)
mint, maxt := ng.findMinMaxTime(s) mint, maxt := ng.findMinMaxTime(s)
querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) querier, err := query.queryable.Querier(mint, maxt)
if err != nil { if err != nil {
prepareSpanTimer.Finish() prepareSpanTimer.Finish()
return nil, nil, err return nil, nil, err
} }
defer querier.Close() defer querier.Close()
ng.populateSeries(querier, s) ng.populateSeries(ctxPrepare, querier, s)
prepareSpanTimer.Finish() prepareSpanTimer.Finish()
// Modify the offset of vector and matrix selectors for the @ modifier // Modify the offset of vector and matrix selectors for the @ modifier
@ -891,7 +892,7 @@ func (ng *Engine) getLastSubqueryInterval(path []parser.Node) time.Duration {
return interval return interval
} }
func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { func (ng *Engine) populateSeries(ctx context.Context, querier storage.Querier, s *parser.EvalStmt) {
// Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range.
// The evaluation of the VectorSelector inside then evaluates the given range and unsets // The evaluation of the VectorSelector inside then evaluates the given range and unsets
// the variable. // the variable.
@ -914,7 +915,7 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) {
} }
evalRange = 0 evalRange = 0
hints.By, hints.Grouping = extractGroupsFromPath(path) hints.By, hints.Grouping = extractGroupsFromPath(path)
n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) n.UnexpandedSeriesSet = querier.Select(ctx, false, hints, n.LabelMatchers...)
case *parser.MatrixSelector: case *parser.MatrixSelector:
evalRange = n.Range evalRange = n.Range
@ -953,7 +954,7 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) {
return false, nil return false, nil
} }
func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.Warnings, error) { func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) {
switch e := expr.(type) { switch e := expr.(type) {
case *parser.MatrixSelector: case *parser.MatrixSelector:
return checkAndExpandSeriesSet(ctx, e.VectorSelector) return checkAndExpandSeriesSet(ctx, e.VectorSelector)
@ -968,7 +969,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (storage.War
return nil, nil return nil, nil
} }
func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws storage.Warnings, err error) { func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, ws annotations.Annotations, err error) {
for it.Next() { for it.Next() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -982,7 +983,7 @@ func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.S
type errWithWarnings struct { type errWithWarnings struct {
err error err error
warnings storage.Warnings warnings annotations.Annotations
} }
func (e errWithWarnings) Error() string { return e.err.Error() } func (e errWithWarnings) Error() string { return e.err.Error() }
@ -1017,7 +1018,7 @@ func (ev *evaluator) error(err error) {
} }
// recover is the handler that turns panics into returns from the top level of evaluation. // recover is the handler that turns panics into returns from the top level of evaluation.
func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error) { func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp *error) {
e := recover() e := recover()
if e == nil { if e == nil {
return return
@ -1033,7 +1034,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error
*errp = fmt.Errorf("unexpected error: %w", err) *errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings: case errWithWarnings:
*errp = err.err *errp = err.err
*ws = append(*ws, err.warnings...) ws.Merge(err.warnings)
case error: case error:
*errp = err *errp = err
default: default:
@ -1041,7 +1042,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *storage.Warnings, errp *error
} }
} }
func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings, err error) { func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) {
defer ev.recover(expr, &ws, &err) defer ev.recover(expr, &ws, &err)
v, ws = ev.eval(expr) v, ws = ev.eval(expr)
@ -1110,19 +1111,19 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels {
// function call results. // function call results.
// The prepSeries function (if provided) can be used to prepare the helper // The prepSeries function (if provided) can be used to prepare the helper
// for each series, then passed to each call funcCall. // for each series, then passed to each call funcCall.
func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) {
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
matrixes := make([]Matrix, len(exprs)) matrixes := make([]Matrix, len(exprs))
origMatrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs))
originalNumSamples := ev.currentSamples originalNumSamples := ev.currentSamples
var warnings storage.Warnings var warnings annotations.Annotations
for i, e := range exprs { for i, e := range exprs {
// Functions will take string arguments from the expressions, not the values. // Functions will take string arguments from the expressions, not the values.
if e != nil && e.Type() != parser.ValueTypeString { if e != nil && e.Type() != parser.ValueTypeString {
// ev.currentSamples will be updated to the correct value within the ev.eval call. // ev.currentSamples will be updated to the correct value within the ev.eval call.
val, ws := ev.eval(e) val, ws := ev.eval(e)
warnings = append(warnings, ws...) warnings.Merge(ws)
matrixes[i] = val.(Matrix) matrixes[i] = val.(Matrix)
// Keep a copy of the original point slices so that they // Keep a copy of the original point slices so that they
@ -1234,7 +1235,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
enh.Ts = ts enh.Ts = ts
result, ws := funcCall(args, bufHelpers, enh) result, ws := funcCall(args, bufHelpers, enh)
enh.Out = result[:0] // Reuse result vector. enh.Out = result[:0] // Reuse result vector.
warnings = append(warnings, ws...) warnings.Merge(ws)
ev.currentSamples += len(result) ev.currentSamples += len(result)
// When we reset currentSamples to tempNumSamples during the next iteration of the loop it also // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also
@ -1311,7 +1312,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
// evalSubquery evaluates given SubqueryExpr and returns an equivalent // evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) {
samplesStats := ev.samplesStats samplesStats := ev.samplesStats
// Avoid double counting samples when running a subquery, those samples will be counted in later stage. // Avoid double counting samples when running a subquery, those samples will be counted in later stage.
ev.samplesStats = ev.samplesStats.NewChild() ev.samplesStats = ev.samplesStats.NewChild()
@ -1344,7 +1345,7 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele
} }
// eval evaluates the given expression as the given AST expression node requires. // eval evaluates the given expression as the given AST expression node requires.
func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) {
// This is the top-level evaluation method. // This is the top-level evaluation method.
// Thus, we check for timeout/cancellation here. // Thus, we check for timeout/cancellation here.
if err := contextDone(ev.ctx, "expression evaluation"); err != nil { if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
@ -1373,17 +1374,17 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
param := unwrapStepInvariantExpr(e.Param) param := unwrapStepInvariantExpr(e.Param)
unwrapParenExpr(&param) unwrapParenExpr(&param)
if s, ok := param.(*parser.StringLiteral); ok { if s, ok := param.(*parser.StringLiteral); ok {
return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil return ev.aggregation(e, sortedGrouping, s.Val, v[0].(Vector), sh[0], enh)
}, e.Expr) }, e.Expr)
} }
return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
var param float64 var param float64
if e.Param != nil { if e.Param != nil {
param = v[0].(Vector)[0].F param = v[0].(Vector)[0].F
} }
return ev.aggregation(e.Op, sortedGrouping, e.Without, param, v[1].(Vector), sh[1], enh), nil return ev.aggregation(e, sortedGrouping, param, v[1].(Vector), sh[1], enh)
}, e.Param, e.Expr) }, e.Param, e.Expr)
case *parser.Call: case *parser.Call:
@ -1405,7 +1406,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
var ( var (
matrixArgIndex int matrixArgIndex int
matrixArg bool matrixArg bool
warnings storage.Warnings warnings annotations.Annotations
) )
for i := range e.Args { for i := range e.Args {
unwrapParenExpr(&e.Args[i]) unwrapParenExpr(&e.Args[i])
@ -1423,7 +1424,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
// Replacing parser.SubqueryExpr with parser.MatrixSelector. // Replacing parser.SubqueryExpr with parser.MatrixSelector.
val, totalSamples, ws := ev.evalSubquery(subq) val, totalSamples, ws := ev.evalSubquery(subq)
e.Args[i] = val e.Args[i] = val
warnings = append(warnings, ws...) warnings.Merge(ws)
defer func() { defer func() {
// subquery result takes space in the memory. Get rid of that at the end. // subquery result takes space in the memory. Get rid of that at the end.
val.VectorSelector.(*parser.VectorSelector).Series = nil val.VectorSelector.(*parser.VectorSelector).Series = nil
@ -1434,8 +1435,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
} }
if !matrixArg { if !matrixArg {
// Does not have a matrix argument. // Does not have a matrix argument.
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return call(v, e.Args, enh), warnings vec, annos := call(v, e.Args, enh)
return vec, warnings.Merge(annos)
}, e.Args...) }, e.Args...)
} }
@ -1449,7 +1451,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
otherArgs[i] = val.(Matrix) otherArgs[i] = val.(Matrix)
otherInArgs[i] = Vector{Sample{}} otherInArgs[i] = Vector{Sample{}}
inArgs[i] = otherInArgs[i] inArgs[i] = otherInArgs[i]
warnings = append(warnings, ws...) warnings.Merge(ws)
} }
} }
@ -1460,7 +1462,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
selVS := sel.VectorSelector.(*parser.VectorSelector) selVS := sel.VectorSelector.(*parser.VectorSelector)
ws, err := checkAndExpandSeriesSet(ev.ctx, sel) ws, err := checkAndExpandSeriesSet(ev.ctx, sel)
warnings = append(warnings, ws...) warnings.Merge(ws)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings})
} }
@ -1523,8 +1525,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
inMatrix[0].Histograms = histograms inMatrix[0].Histograms = histograms
enh.Ts = ts enh.Ts = ts
// Make the function call. // Make the function call.
outVec := call(inArgs, e.Args, enh) outVec, annos := call(inArgs, e.Args, enh)
warnings.Merge(annos)
ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms))) ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms)))
enh.Out = outVec[:0] enh.Out = outVec[:0]
if len(outVec) > 0 { if len(outVec) > 0 {
if outVec[0].H == nil { if outVec[0].H == nil {
@ -1627,7 +1631,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
case *parser.BinaryExpr: case *parser.BinaryExpr:
switch lt, rt := e.LHS.Type(), e.RHS.Type(); { switch lt, rt := e.LHS.Type(), e.RHS.Type(); {
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F)
return append(enh.Out, Sample{F: val}), nil return append(enh.Out, Sample{F: val}), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
@ -1640,36 +1644,36 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
} }
switch e.Op { switch e.Op {
case parser.LAND: case parser.LAND:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LOR: case parser.LOR:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case parser.LUNLESS: case parser.LUNLESS:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
default: default:
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh), nil
}, e.LHS, e.RHS) }, e.LHS, e.RHS)
} }
case *parser.NumberLiteral: case *parser.NumberLiteral:
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil
}) })
@ -1835,7 +1839,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
panic(fmt.Errorf("unhandled expression of type: %T", expr)) panic(fmt.Errorf("unhandled expression of type: %T", expr))
} }
func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, storage.Warnings) { func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) {
ws, err := checkAndExpandSeriesSet(ev.ctx, vs) ws, err := checkAndExpandSeriesSet(ev.ctx, vs)
if err != nil { if err != nil {
ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws})
@ -1847,7 +1851,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta))
} }
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
if vs.Timestamp != nil { if vs.Timestamp != nil {
// This is a special case for "timestamp()" when the @ modifier is used, to ensure that // This is a special case for "timestamp()" when the @ modifier is used, to ensure that
// we return a point for each time step in this case. // we return a point for each time step in this case.
@ -1875,7 +1879,8 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
} }
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)
return call([]parser.Value{vec}, e.Args, enh), ws vec, annos := call([]parser.Value{vec}, e.Args, enh)
return vec, ws.Merge(annos)
}) })
} }
@ -1946,7 +1951,7 @@ func putHPointSlice(p []HPoint) {
} }
// matrixSelector evaluates a *parser.MatrixSelector expression. // matrixSelector evaluates a *parser.MatrixSelector expression.
func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storage.Warnings) { func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) {
var ( var (
vs = node.VectorSelector.(*parser.VectorSelector) vs = node.VectorSelector.(*parser.VectorSelector)
@ -2526,7 +2531,10 @@ type groupedAggregation struct {
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
// must be sorted. // must be sorted.
func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector { func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
op := e.Op
without := e.Without
annos := annotations.Annotations{}
result := map[uint64]*groupedAggregation{} result := map[uint64]*groupedAggregation{}
orderedResult := []*groupedAggregation{} orderedResult := []*groupedAggregation{}
var k int64 var k int64
@ -2537,7 +2545,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
} }
k = int64(f) k = int64(f)
if k < 1 { if k < 1 {
return Vector{} return Vector{}, annos
} }
} }
var q float64 var q float64
@ -2790,7 +2798,8 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
case parser.AVG: case parser.AVG:
if aggr.hasFloat && aggr.hasHistogram { if aggr.hasFloat && aggr.hasHistogram {
// We cannot aggregate histogram sample with a float64 sample. // We cannot aggregate histogram sample with a float64 sample.
// TODO(zenador): Issue warning when plumbing is in place. metricName := aggr.labels.Get(labels.MetricName)
annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange()))
continue continue
} }
if aggr.hasHistogram { if aggr.hasHistogram {
@ -2835,12 +2844,16 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
continue // Bypass default append. continue // Bypass default append.
case parser.QUANTILE: case parser.QUANTILE:
if math.IsNaN(q) || q < 0 || q > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(q, e.Param.PositionRange()))
}
aggr.floatValue = quantile(q, aggr.heap) aggr.floatValue = quantile(q, aggr.heap)
case parser.SUM: case parser.SUM:
if aggr.hasFloat && aggr.hasHistogram { if aggr.hasFloat && aggr.hasHistogram {
// We cannot aggregate histogram sample with a float64 sample. // We cannot aggregate histogram sample with a float64 sample.
// TODO(zenador): Issue warning when plumbing is in place. metricName := aggr.labels.Get(labels.MetricName)
annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, e.Expr.PositionRange()))
continue continue
} }
if aggr.hasHistogram { if aggr.hasHistogram {
@ -2856,7 +2869,7 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
H: aggr.histogramValue, H: aggr.histogramValue,
}) })
} }
return enh.Out return enh.Out, annos
} }
// groupingKey builds and returns the grouping key for the given metric and // groupingKey builds and returns the grouping key for the given metric and

View file

@ -33,8 +33,10 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
) )
@ -195,15 +197,15 @@ type errQuerier struct {
err error err error
} }
func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return errSeriesSet{err: q.err} return errSeriesSet{err: q.err}
} }
func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) Close() error { return nil } func (*errQuerier) Close() error { return nil }
@ -213,10 +215,10 @@ type errSeriesSet struct {
err error err error
} }
func (errSeriesSet) Next() bool { return false } func (errSeriesSet) Next() bool { return false }
func (errSeriesSet) At() storage.Series { return nil } func (errSeriesSet) At() storage.Series { return nil }
func (e errSeriesSet) Err() error { return e.err } func (e errSeriesSet) Err() error { return e.err }
func (e errSeriesSet) Warnings() storage.Warnings { return nil } func (e errSeriesSet) Warnings() annotations.Annotations { return nil }
func TestQueryError(t *testing.T) { func TestQueryError(t *testing.T) {
opts := EngineOpts{ opts := EngineOpts{
@ -227,7 +229,7 @@ func TestQueryError(t *testing.T) {
} }
engine := NewEngine(opts) engine := NewEngine(opts)
errStorage := ErrStorage{errors.New("storage error")} errStorage := ErrStorage{errors.New("storage error")}
queryable := storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) {
return &errQuerier{err: errStorage}, nil return &errQuerier{err: errStorage}, nil
}) })
ctx, cancelCtx := context.WithCancel(context.Background()) ctx, cancelCtx := context.WithCancel(context.Background())
@ -252,7 +254,7 @@ type noopHintRecordingQueryable struct {
hints []*storage.SelectHints hints []*storage.SelectHints
} }
func (h *noopHintRecordingQueryable) Querier(context.Context, int64, int64) (storage.Querier, error) { func (h *noopHintRecordingQueryable) Querier(int64, int64) (storage.Querier, error) {
return &hintRecordingQuerier{Querier: &errQuerier{}, h: h}, nil return &hintRecordingQuerier{Querier: &errQuerier{}, h: h}, nil
} }
@ -262,9 +264,9 @@ type hintRecordingQuerier struct {
h *noopHintRecordingQueryable h *noopHintRecordingQueryable
} }
func (h *hintRecordingQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { func (h *hintRecordingQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
h.h.hints = append(h.h.hints, hints) h.h.hints = append(h.h.hints, hints)
return h.Querier.Select(sortSeries, hints, matchers...) return h.Querier.Select(ctx, sortSeries, hints, matchers...)
} }
func TestSelectHintsSetCorrectly(t *testing.T) { func TestSelectHintsSetCorrectly(t *testing.T) {
@ -1676,9 +1678,9 @@ func TestRecoverEvaluatorError(t *testing.T) {
func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()} ev := &evaluator{logger: log.NewNopLogger()}
var err error var err error
var ws storage.Warnings var ws annotations.Annotations
warnings := storage.Warnings{errors.New("custom warning")} warnings := annotations.New().Add(errors.New("custom warning"))
e := errWithWarnings{ e := errWithWarnings{
err: errors.New("custom error"), err: errors.New("custom error"),
warnings: warnings, warnings: warnings,
@ -2147,7 +2149,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
expected: &parser.StepInvariantExpr{ expected: &parser.StepInvariantExpr{
Expr: &parser.NumberLiteral{ Expr: &parser.NumberLiteral{
Val: 123.4567, Val: 123.4567,
PosRange: parser.PositionRange{Start: 0, End: 8}, PosRange: posrange.PositionRange{Start: 0, End: 8},
}, },
}, },
}, },
@ -2156,7 +2158,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
expected: &parser.StepInvariantExpr{ expected: &parser.StepInvariantExpr{
Expr: &parser.StringLiteral{ Expr: &parser.StringLiteral{
Val: "foo", Val: "foo",
PosRange: parser.PositionRange{Start: 0, End: 5}, PosRange: posrange.PositionRange{Start: 0, End: 5},
}, },
}, },
}, },
@ -2169,7 +2171,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 3, End: 3,
}, },
@ -2179,7 +2181,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 6, Start: 6,
End: 9, End: 9,
}, },
@ -2196,7 +2198,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 3, End: 3,
}, },
@ -2207,7 +2209,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 6, Start: 6,
End: 14, End: 14,
}, },
@ -2227,7 +2229,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 8, End: 8,
}, },
@ -2238,7 +2240,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 11, Start: 11,
End: 19, End: 19,
}, },
@ -2256,7 +2258,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 4, End: 4,
}, },
@ -2276,7 +2278,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "a", "b"), parser.MustLabelMatcher(labels.MatchEqual, "a", "b"),
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2295,13 +2297,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 13, Start: 13,
End: 24, End: 24,
}, },
}, },
Grouping: []string{"foo"}, Grouping: []string{"foo"},
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 25, End: 25,
}, },
@ -2317,14 +2319,14 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 13, Start: 13,
End: 29, End: 29,
}, },
Timestamp: makeInt64Pointer(10000), Timestamp: makeInt64Pointer(10000),
}, },
Grouping: []string{"foo"}, Grouping: []string{"foo"},
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 30, End: 30,
}, },
@ -2344,13 +2346,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 4, Start: 4,
End: 21, End: 21,
}, },
Timestamp: makeInt64Pointer(10000), Timestamp: makeInt64Pointer(10000),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 22, End: 22,
}, },
@ -2362,13 +2364,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 29, Start: 29,
End: 46, End: 46,
}, },
Timestamp: makeInt64Pointer(20000), Timestamp: makeInt64Pointer(20000),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 25, Start: 25,
End: 47, End: 47,
}, },
@ -2388,7 +2390,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2405,7 +2407,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 29, Start: 29,
End: 40, End: 40,
}, },
@ -2415,19 +2417,19 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
EndPos: 49, EndPos: 49,
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 24, Start: 24,
End: 50, End: 50,
}, },
}, },
Param: &parser.NumberLiteral{ Param: &parser.NumberLiteral{
Val: 5, Val: 5,
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 21, Start: 21,
End: 22, End: 22,
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 16, Start: 16,
End: 51, End: 51,
}, },
@ -2440,7 +2442,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
expected: &parser.Call{ expected: &parser.Call{
Func: parser.MustGetFunction("time"), Func: parser.MustGetFunction("time"),
Args: parser.Expressions{}, Args: parser.Expressions{},
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 6, End: 6,
}, },
@ -2455,7 +2457,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"),
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 14, End: 14,
}, },
@ -2475,7 +2477,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"),
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 14, End: 14,
}, },
@ -2500,13 +2502,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"),
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 4, Start: 4,
End: 23, End: 23,
}, },
Timestamp: makeInt64Pointer(20000), Timestamp: makeInt64Pointer(20000),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 24, End: 24,
}, },
@ -2537,7 +2539,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"), parser.MustLabelMatcher(labels.MatchEqual, "bar", "baz"),
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 19, Start: 19,
End: 33, End: 33,
}, },
@ -2546,7 +2548,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
EndPos: 37, EndPos: 37,
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 14, Start: 14,
End: 38, End: 38,
}, },
@ -2556,7 +2558,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
EndPos: 56, EndPos: 56,
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 57, End: 57,
}, },
@ -2576,7 +2578,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 27, End: 27,
}, },
@ -2598,7 +2600,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2626,7 +2628,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 1, Start: 1,
End: 4, End: 4,
}, },
@ -2639,14 +2641,14 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "bar"),
}, },
Timestamp: makeInt64Pointer(1234000), Timestamp: makeInt64Pointer(1234000),
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 7, Start: 7,
End: 27, End: 27,
}, },
}, },
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 28, End: 28,
}, },
@ -2677,18 +2679,18 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 8, Start: 8,
End: 19, End: 19,
}, },
Timestamp: makeInt64Pointer(10000), Timestamp: makeInt64Pointer(10000),
}}, }},
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 4, Start: 4,
End: 20, End: 20,
}, },
}}, }},
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 21, End: 21,
}, },
@ -2710,13 +2712,13 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric1"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 8, Start: 8,
End: 25, End: 25,
}, },
Timestamp: makeInt64Pointer(10000), Timestamp: makeInt64Pointer(10000),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 4, Start: 4,
End: 26, End: 26,
}, },
@ -2728,19 +2730,19 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric2"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 33, Start: 33,
End: 50, End: 50,
}, },
Timestamp: makeInt64Pointer(20000), Timestamp: makeInt64Pointer(20000),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 29, Start: 29,
End: 52, End: 52,
}, },
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 52, End: 52,
}, },
@ -2755,7 +2757,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 13, End: 13,
}, },
@ -2772,7 +2774,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "foo"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2792,7 +2794,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 4, End: 4,
}, },
@ -2813,7 +2815,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "test"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 4, End: 4,
}, },
@ -2832,7 +2834,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2854,7 +2856,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 11, End: 11,
}, },
@ -2884,7 +2886,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
LabelMatchers: []*labels.Matcher{ LabelMatchers: []*labels.Matcher{
parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"), parser.MustLabelMatcher(labels.MatchEqual, "__name__", "some_metric"),
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 6, Start: 6,
End: 17, End: 17,
}, },
@ -2895,20 +2897,20 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
Op: parser.MUL, Op: parser.MUL,
LHS: &parser.NumberLiteral{ LHS: &parser.NumberLiteral{
Val: 3, Val: 3,
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 21, Start: 21,
End: 22, End: 22,
}, },
}, },
RHS: &parser.NumberLiteral{ RHS: &parser.NumberLiteral{
Val: 1024, Val: 1024,
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 25, Start: 25,
End: 29, End: 29,
}, },
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 20, Start: 20,
End: 30, End: 30,
}, },
@ -2916,7 +2918,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
}, },
}, },
}, },
PosRange: parser.PositionRange{ PosRange: posrange.PositionRange{
Start: 0, Start: 0,
End: 31, End: 31,
}, },

File diff suppressed because it is too large Load diff

View file

@ -20,6 +20,8 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/promql/parser/posrange"
) )
// Node is a generic interface for all nodes in an AST. // Node is a generic interface for all nodes in an AST.
@ -45,7 +47,7 @@ type Node interface {
Pretty(level int) string Pretty(level int) string
// PositionRange returns the position of the AST Node in the query string. // PositionRange returns the position of the AST Node in the query string.
PositionRange() PositionRange PositionRange() posrange.PositionRange
} }
// Statement is a generic interface for all statements. // Statement is a generic interface for all statements.
@ -94,7 +96,7 @@ type AggregateExpr struct {
Param Expr // Parameter used by some aggregators. Param Expr // Parameter used by some aggregators.
Grouping []string // The labels by which to group the Vector. Grouping []string // The labels by which to group the Vector.
Without bool // Whether to drop the given labels rather than keep them. Without bool // Whether to drop the given labels rather than keep them.
PosRange PositionRange PosRange posrange.PositionRange
} }
// BinaryExpr represents a binary expression between two child expressions. // BinaryExpr represents a binary expression between two child expressions.
@ -115,7 +117,7 @@ type Call struct {
Func *Function // The function that was called. Func *Function // The function that was called.
Args Expressions // Arguments used in the call. Args Expressions // Arguments used in the call.
PosRange PositionRange PosRange posrange.PositionRange
} }
// MatrixSelector represents a Matrix selection. // MatrixSelector represents a Matrix selection.
@ -125,7 +127,7 @@ type MatrixSelector struct {
VectorSelector Expr VectorSelector Expr
Range time.Duration Range time.Duration
EndPos Pos EndPos posrange.Pos
} }
// SubqueryExpr represents a subquery. // SubqueryExpr represents a subquery.
@ -143,27 +145,27 @@ type SubqueryExpr struct {
StartOrEnd ItemType // Set when @ is used with start() or end() StartOrEnd ItemType // Set when @ is used with start() or end()
Step time.Duration Step time.Duration
EndPos Pos EndPos posrange.Pos
} }
// NumberLiteral represents a number. // NumberLiteral represents a number.
type NumberLiteral struct { type NumberLiteral struct {
Val float64 Val float64
PosRange PositionRange PosRange posrange.PositionRange
} }
// ParenExpr wraps an expression so it cannot be disassembled as a consequence // ParenExpr wraps an expression so it cannot be disassembled as a consequence
// of operator precedence. // of operator precedence.
type ParenExpr struct { type ParenExpr struct {
Expr Expr Expr Expr
PosRange PositionRange PosRange posrange.PositionRange
} }
// StringLiteral represents a string. // StringLiteral represents a string.
type StringLiteral struct { type StringLiteral struct {
Val string Val string
PosRange PositionRange PosRange posrange.PositionRange
} }
// UnaryExpr represents a unary operation on another expression. // UnaryExpr represents a unary operation on another expression.
@ -172,7 +174,7 @@ type UnaryExpr struct {
Op ItemType Op ItemType
Expr Expr Expr Expr
StartPos Pos StartPos posrange.Pos
} }
// StepInvariantExpr represents a query which evaluates to the same result // StepInvariantExpr represents a query which evaluates to the same result
@ -184,7 +186,9 @@ type StepInvariantExpr struct {
func (e *StepInvariantExpr) String() string { return e.Expr.String() } func (e *StepInvariantExpr) String() string { return e.Expr.String() }
func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } func (e *StepInvariantExpr) PositionRange() posrange.PositionRange {
return e.Expr.PositionRange()
}
// VectorSelector represents a Vector selection. // VectorSelector represents a Vector selection.
type VectorSelector struct { type VectorSelector struct {
@ -204,7 +208,7 @@ type VectorSelector struct {
UnexpandedSeriesSet storage.SeriesSet UnexpandedSeriesSet storage.SeriesSet
Series []storage.Series Series []storage.Series
PosRange PositionRange PosRange posrange.PositionRange
} }
// TestStmt is an internal helper statement that allows execution // TestStmt is an internal helper statement that allows execution
@ -215,8 +219,8 @@ func (TestStmt) String() string { return "test statement" }
func (TestStmt) PromQLStmt() {} func (TestStmt) PromQLStmt() {}
func (t TestStmt) Pretty(int) string { return t.String() } func (t TestStmt) Pretty(int) string { return t.String() }
func (TestStmt) PositionRange() PositionRange { func (TestStmt) PositionRange() posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: -1, Start: -1,
End: -1, End: -1,
} }
@ -405,17 +409,11 @@ func Children(node Node) []Node {
} }
} }
// PositionRange describes a position in the input string of the parser.
type PositionRange struct {
Start Pos
End Pos
}
// mergeRanges is a helper function to merge the PositionRanges of two Nodes. // mergeRanges is a helper function to merge the PositionRanges of two Nodes.
// Note that the arguments must be in the same order as they // Note that the arguments must be in the same order as they
// occur in the input string. // occur in the input string.
func mergeRanges(first, last Node) PositionRange { func mergeRanges(first, last Node) posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: first.PositionRange().Start, Start: first.PositionRange().Start,
End: last.PositionRange().End, End: last.PositionRange().End,
} }
@ -423,33 +421,33 @@ func mergeRanges(first, last Node) PositionRange {
// Item implements the Node interface. // Item implements the Node interface.
// This makes it possible to call mergeRanges on them. // This makes it possible to call mergeRanges on them.
func (i *Item) PositionRange() PositionRange { func (i *Item) PositionRange() posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: i.Pos, Start: i.Pos,
End: i.Pos + Pos(len(i.Val)), End: i.Pos + posrange.Pos(len(i.Val)),
} }
} }
func (e *AggregateExpr) PositionRange() PositionRange { func (e *AggregateExpr) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }
func (e *BinaryExpr) PositionRange() PositionRange { func (e *BinaryExpr) PositionRange() posrange.PositionRange {
return mergeRanges(e.LHS, e.RHS) return mergeRanges(e.LHS, e.RHS)
} }
func (e *Call) PositionRange() PositionRange { func (e *Call) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }
func (e *EvalStmt) PositionRange() PositionRange { func (e *EvalStmt) PositionRange() posrange.PositionRange {
return e.Expr.PositionRange() return e.Expr.PositionRange()
} }
func (e Expressions) PositionRange() PositionRange { func (e Expressions) PositionRange() posrange.PositionRange {
if len(e) == 0 { if len(e) == 0 {
// Position undefined. // Position undefined.
return PositionRange{ return posrange.PositionRange{
Start: -1, Start: -1,
End: -1, End: -1,
} }
@ -457,39 +455,39 @@ func (e Expressions) PositionRange() PositionRange {
return mergeRanges(e[0], e[len(e)-1]) return mergeRanges(e[0], e[len(e)-1])
} }
func (e *MatrixSelector) PositionRange() PositionRange { func (e *MatrixSelector) PositionRange() posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: e.VectorSelector.PositionRange().Start, Start: e.VectorSelector.PositionRange().Start,
End: e.EndPos, End: e.EndPos,
} }
} }
func (e *SubqueryExpr) PositionRange() PositionRange { func (e *SubqueryExpr) PositionRange() posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: e.Expr.PositionRange().Start, Start: e.Expr.PositionRange().Start,
End: e.EndPos, End: e.EndPos,
} }
} }
func (e *NumberLiteral) PositionRange() PositionRange { func (e *NumberLiteral) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }
func (e *ParenExpr) PositionRange() PositionRange { func (e *ParenExpr) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }
func (e *StringLiteral) PositionRange() PositionRange { func (e *StringLiteral) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }
func (e *UnaryExpr) PositionRange() PositionRange { func (e *UnaryExpr) PositionRange() posrange.PositionRange {
return PositionRange{ return posrange.PositionRange{
Start: e.StartPos, Start: e.StartPos,
End: e.Expr.PositionRange().End, End: e.Expr.PositionRange().End,
} }
} }
func (e *VectorSelector) PositionRange() PositionRange { func (e *VectorSelector) PositionRange() posrange.PositionRange {
return e.PosRange return e.PosRange
} }

View file

@ -22,6 +22,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/promql/parser/posrange"
) )
%} %}
@ -199,7 +200,7 @@ start :
{ yylex.(*parser).generatedParserResult = $2 } { yylex.(*parser).generatedParserResult = $2 }
| START_SERIES_DESCRIPTION series_description | START_SERIES_DESCRIPTION series_description
| START_EXPRESSION /* empty */ EOF | START_EXPRESSION /* empty */ EOF
{ yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input")} { yylex.(*parser).addParseErrf(posrange.PositionRange{}, "no expression found in input")}
| START_EXPRESSION expr | START_EXPRESSION expr
{ yylex.(*parser).generatedParserResult = $2 } { yylex.(*parser).generatedParserResult = $2 }
| START_METRIC_SELECTOR vector_selector | START_METRIC_SELECTOR vector_selector
@ -371,7 +372,7 @@ function_call : IDENTIFIER function_call_body
$$ = &Call{ $$ = &Call{
Func: fn, Func: fn,
Args: $2.(Expressions), Args: $2.(Expressions),
PosRange: PositionRange{ PosRange: posrange.PositionRange{
Start: $1.Pos, Start: $1.Pos,
End: yylex.(*parser).lastClosing, End: yylex.(*parser).lastClosing,
}, },

File diff suppressed because it is too large Load diff

View file

@ -19,13 +19,15 @@ import (
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"github.com/prometheus/prometheus/promql/parser/posrange"
) )
// Item represents a token or text string returned from the scanner. // Item represents a token or text string returned from the scanner.
type Item struct { type Item struct {
Typ ItemType // The type of this Item. Typ ItemType // The type of this Item.
Pos Pos // The starting position, in bytes, of this Item in the input string. Pos posrange.Pos // The starting position, in bytes, of this Item in the input string.
Val string // The value of this Item. Val string // The value of this Item.
} }
// String returns a descriptive string for the Item. // String returns a descriptive string for the Item.
@ -234,10 +236,6 @@ const eof = -1
// stateFn represents the state of the scanner as a function that returns the next state. // stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*Lexer) stateFn type stateFn func(*Lexer) stateFn
// Pos is the position in a string.
// Negative numbers indicate undefined positions.
type Pos int
type histogramState int type histogramState int
const ( const (
@ -250,14 +248,14 @@ const (
// Lexer holds the state of the scanner. // Lexer holds the state of the scanner.
type Lexer struct { type Lexer struct {
input string // The string being scanned. input string // The string being scanned.
state stateFn // The next lexing function to enter. state stateFn // The next lexing function to enter.
pos Pos // Current position in the input. pos posrange.Pos // Current position in the input.
start Pos // Start position of this Item. start posrange.Pos // Start position of this Item.
width Pos // Width of last rune read from input. width posrange.Pos // Width of last rune read from input.
lastPos Pos // Position of most recent Item returned by NextItem. lastPos posrange.Pos // Position of most recent Item returned by NextItem.
itemp *Item // Pointer to where the next scanned item should be placed. itemp *Item // Pointer to where the next scanned item should be placed.
scannedItem bool // Set to true every time an item is scanned. scannedItem bool // Set to true every time an item is scanned.
parenDepth int // Nesting depth of ( ) exprs. parenDepth int // Nesting depth of ( ) exprs.
braceOpen bool // Whether a { is opened. braceOpen bool // Whether a { is opened.
@ -278,7 +276,7 @@ func (l *Lexer) next() rune {
return eof return eof
} }
r, w := utf8.DecodeRuneInString(l.input[l.pos:]) r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = Pos(w) l.width = posrange.Pos(w)
l.pos += l.width l.pos += l.width
return r return r
} }
@ -827,7 +825,7 @@ func lexSpace(l *Lexer) stateFn {
// lexLineComment scans a line comment. Left comment marker is known to be present. // lexLineComment scans a line comment. Left comment marker is known to be present.
func lexLineComment(l *Lexer) stateFn { func lexLineComment(l *Lexer) stateFn {
l.pos += Pos(len(lineComment)) l.pos += posrange.Pos(len(lineComment))
for r := l.next(); !isEndOfLine(r) && r != eof; { for r := l.next(); !isEndOfLine(r) && r != eof; {
r = l.next() r = l.next()
} }

View file

@ -17,6 +17,8 @@ import (
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/parser/posrange"
) )
type testCase struct { type testCase struct {
@ -824,7 +826,7 @@ func TestLexer(t *testing.T) {
require.Fail(t, "unexpected lexing error at position %d: %s", lastItem.Pos, lastItem) require.Fail(t, "unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
} }
eofItem := Item{EOF, Pos(len(test.input)), ""} eofItem := Item{EOF, posrange.Pos(len(test.input)), ""}
require.Equal(t, lastItem, eofItem, "%d: input %q", i, test.input) require.Equal(t, lastItem, eofItem, "%d: input %q", i, test.input)
out = out[:len(out)-1] out = out[:len(out)-1]

View file

@ -29,6 +29,7 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
) )
@ -54,7 +55,7 @@ type parser struct {
// Everytime an Item is lexed that could be the end // Everytime an Item is lexed that could be the end
// of certain expressions its end position is stored here. // of certain expressions its end position is stored here.
lastClosing Pos lastClosing posrange.Pos
yyParser yyParserImpl yyParser yyParserImpl
@ -121,7 +122,7 @@ func (p *parser) Close() {
// ParseErr wraps a parsing error with line and position context. // ParseErr wraps a parsing error with line and position context.
type ParseErr struct { type ParseErr struct {
PositionRange PositionRange PositionRange posrange.PositionRange
Err error Err error
Query string Query string
@ -130,27 +131,7 @@ type ParseErr struct {
} }
func (e *ParseErr) Error() string { func (e *ParseErr) Error() string {
pos := int(e.PositionRange.Start) return fmt.Sprintf("%s: parse error: %s", e.PositionRange.StartPosInput(e.Query, e.LineOffset), e.Err)
lastLineBreak := -1
line := e.LineOffset + 1
var positionStr string
if pos < 0 || pos > len(e.Query) {
positionStr = "invalid position:"
} else {
for i, c := range e.Query[:pos] {
if c == '\n' {
lastLineBreak = i
line++
}
}
col := pos - lastLineBreak
positionStr = fmt.Sprintf("%d:%d:", line, col)
}
return fmt.Sprintf("%s parse error: %s", positionStr, e.Err)
} }
type ParseErrors []ParseErr type ParseErrors []ParseErr
@ -275,12 +256,12 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
} }
// addParseErrf formats the error and appends it to the list of parsing errors. // addParseErrf formats the error and appends it to the list of parsing errors.
func (p *parser) addParseErrf(positionRange PositionRange, format string, args ...interface{}) { func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) {
p.addParseErr(positionRange, fmt.Errorf(format, args...)) p.addParseErr(positionRange, fmt.Errorf(format, args...))
} }
// addParseErr appends the provided error to the list of parsing errors. // addParseErr appends the provided error to the list of parsing errors.
func (p *parser) addParseErr(positionRange PositionRange, err error) { func (p *parser) addParseErr(positionRange posrange.PositionRange, err error) {
perr := ParseErr{ perr := ParseErr{
PositionRange: positionRange, PositionRange: positionRange,
Err: err, Err: err,
@ -366,9 +347,9 @@ func (p *parser) Lex(lval *yySymType) int {
switch typ { switch typ {
case ERROR: case ERROR:
pos := PositionRange{ pos := posrange.PositionRange{
Start: p.lex.start, Start: p.lex.start,
End: Pos(len(p.lex.input)), End: posrange.Pos(len(p.lex.input)),
} }
p.addParseErr(pos, errors.New(p.yyParser.lval.item.Val)) p.addParseErr(pos, errors.New(p.yyParser.lval.item.Val))
@ -378,7 +359,7 @@ func (p *parser) Lex(lval *yySymType) int {
lval.item.Typ = EOF lval.item.Typ = EOF
p.InjectItem(0) p.InjectItem(0)
case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER: case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER:
p.lastClosing = lval.item.Pos + Pos(len(lval.item.Val)) p.lastClosing = lval.item.Pos + posrange.Pos(len(lval.item.Val))
} }
return int(typ) return int(typ)
@ -436,7 +417,7 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
ret = modifier.(*AggregateExpr) ret = modifier.(*AggregateExpr)
arguments := args.(Expressions) arguments := args.(Expressions)
ret.PosRange = PositionRange{ ret.PosRange = posrange.PositionRange{
Start: op.Pos, Start: op.Pos,
End: p.lastClosing, End: p.lastClosing,
} }
@ -477,7 +458,7 @@ func (p *parser) newMap() (ret map[string]interface{}) {
func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) { func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
for key, value := range *right { for key, value := range *right {
if _, ok := (*left)[key]; ok { if _, ok := (*left)[key]; ok {
p.addParseErrf(PositionRange{}, "duplicate key \"%s\" in histogram", key) p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
continue continue
} }
(*left)[key] = value (*left)[key] = value
@ -677,7 +658,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
// opRange returns the PositionRange of the operator part of the BinaryExpr. // opRange returns the PositionRange of the operator part of the BinaryExpr.
// This is made a function instead of a variable, so it is lazily evaluated on demand. // This is made a function instead of a variable, so it is lazily evaluated on demand.
opRange := func() (r PositionRange) { opRange := func() (r posrange.PositionRange) {
// Remove whitespace at the beginning and end of the range. // Remove whitespace at the beginning and end of the range.
for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive for r.Start = n.LHS.PositionRange().End; isSpace(rune(p.lex.input[r.Start])); r.Start++ { // nolint:revive
} }
@ -881,7 +862,7 @@ func (p *parser) newLabelMatcher(label, operator, value Item) *labels.Matcher {
// addOffset is used to set the offset in the generated parser. // addOffset is used to set the offset in the generated parser.
func (p *parser) addOffset(e Node, offset time.Duration) { func (p *parser) addOffset(e Node, offset time.Duration) {
var orgoffsetp *time.Duration var orgoffsetp *time.Duration
var endPosp *Pos var endPosp *posrange.Pos
switch s := e.(type) { switch s := e.(type) {
case *VectorSelector: case *VectorSelector:
@ -921,7 +902,7 @@ func (p *parser) setTimestamp(e Node, ts float64) {
p.addParseErrf(e.PositionRange(), "timestamp out of bounds for @ modifier: %f", ts) p.addParseErrf(e.PositionRange(), "timestamp out of bounds for @ modifier: %f", ts)
} }
var timestampp **int64 var timestampp **int64
var endPosp *Pos var endPosp *posrange.Pos
timestampp, _, endPosp, ok := p.getAtModifierVars(e) timestampp, _, endPosp, ok := p.getAtModifierVars(e)
if !ok { if !ok {
@ -950,11 +931,11 @@ func (p *parser) setAtModifierPreprocessor(e Node, op Item) {
*endPosp = p.lastClosing *endPosp = p.lastClosing
} }
func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *Pos, bool) { func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *posrange.Pos, bool) {
var ( var (
timestampp **int64 timestampp **int64
preprocp *ItemType preprocp *ItemType
endPosp *Pos endPosp *posrange.Pos
) )
switch s := e.(type) { switch s := e.(type) {
case *VectorSelector: case *VectorSelector:

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,54 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// posrange is used to report a position in query strings for error
// and warning messages.
package posrange
import "fmt"
// Pos is the position in a string.
// Negative numbers indicate undefined positions.
type Pos int
// PositionRange describes a position in the input string of the parser.
type PositionRange struct {
Start Pos
End Pos
}
// StartPosInput uses the query string to convert the PositionRange into a
// line:col string, indicating when this is not possible if the query is empty
// or the position is invalid. When this is used to convert ParseErr to a string,
// lineOffset is an additional line offset to be added, and is only used inside
// unit tests.
func (p PositionRange) StartPosInput(query string, lineOffset int) string {
if query == "" {
return "unknown position"
}
pos := int(p.Start)
if pos < 0 || pos > len(query) {
return "invalid position"
}
lastLineBreak := -1
line := lineOffset + 1
for i, c := range query[:pos] {
if c == '\n' {
lastLineBreak = i
line++
}
}
col := pos - lastLineBreak
return fmt.Sprintf("%d:%d", line, col)
}

View file

@ -34,6 +34,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
@ -197,7 +198,7 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if err != nil { if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) { parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
parseErr.LineOffset = i parseErr.LineOffset = i
posOffset := parser.Pos(strings.Index(lines[i], expr)) posOffset := posrange.Pos(strings.Index(lines[i], expr))
parseErr.PositionRange.Start += posOffset parseErr.PositionRange.Start += posOffset
parseErr.PositionRange.End += posOffset parseErr.PositionRange.End += posOffset
parseErr.Query = lines[i] parseErr.Query = lines[i]

View file

@ -123,7 +123,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
// Check the series. // Check the series.
queryable := suite.Queryable() queryable := suite.Queryable()
querier, err := queryable.Querier(suite.Context(), math.MinInt64, math.MaxInt64) querier, err := queryable.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
for _, s := range tc.series { for _, s := range tc.series {
var matchers []*labels.Matcher var matchers []*labels.Matcher
@ -134,7 +134,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
}) })
// Get the series for the matcher. // Get the series for the matcher.
ss := querier.Select(false, nil, matchers...) ss := querier.Select(suite.Context(), false, nil, matchers...)
require.True(t, ss.Next()) require.True(t, ss.Next())
storageSeries := ss.At() storageSeries := ss.At()
require.False(t, ss.Next(), "Expecting only 1 series") require.False(t, ss.Next(), "Expecting only 1 series")

View file

@ -24,8 +24,8 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
) )
func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix } func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix }
@ -303,7 +303,7 @@ func (m Matrix) ContainsSameLabelset() bool {
type Result struct { type Result struct {
Err error Err error
Value parser.Value Value parser.Value
Warnings storage.Warnings Warnings annotations.Annotations
} }
// Vector returns a Vector if the result value is one. An error is returned if // Vector returns a Vector if the result value is one. An error is returned if

View file

@ -261,7 +261,7 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro
} }
// QueryforStateSeries returns the series for ALERTS_FOR_STATE. // QueryforStateSeries returns the series for ALERTS_FOR_STATE.
func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (storage.Series, error) { func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q storage.Querier) (storage.Series, error) {
smpl := r.forStateSample(alert, time.Now(), 0) smpl := r.forStateSample(alert, time.Now(), 0)
var matchers []*labels.Matcher var matchers []*labels.Matcher
smpl.Metric.Range(func(l labels.Label) { smpl.Metric.Range(func(l labels.Label) {
@ -271,7 +271,7 @@ func (r *AlertingRule) QueryforStateSeries(alert *Alert, q storage.Querier) (sto
} }
matchers = append(matchers, mt) matchers = append(matchers, mt)
}) })
sset := q.Select(false, nil, matchers...) sset := q.Select(ctx, false, nil, matchers...)
var s storage.Series var s storage.Series
for sset.Next() { for sset.Next() {

View file

@ -659,7 +659,7 @@ func TestQueryForStateSeries(t *testing.T) {
ValidUntil: time.Time{}, ValidUntil: time.Time{},
} }
series, err := rule.QueryforStateSeries(alert, querier) series, err := rule.QueryforStateSeries(context.Background(), alert, querier)
require.Equal(t, tst.expectedSeries, series) require.Equal(t, tst.expectedSeries, series)
require.Equal(t, tst.expectedError, err) require.Equal(t, tst.expectedError, err)

View file

@ -844,7 +844,7 @@ func (g *Group) RestoreForState(ts time.Time) {
// We allow restoration only if alerts were active before after certain time. // We allow restoration only if alerts were active before after certain time.
mint := ts.Add(-g.opts.OutageTolerance) mint := ts.Add(-g.opts.OutageTolerance)
mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
q, err := g.opts.Queryable.Querier(g.opts.Context, mintMS, maxtMS) q, err := g.opts.Queryable.Querier(mintMS, maxtMS)
if err != nil { if err != nil {
level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
return return
@ -873,7 +873,7 @@ func (g *Group) RestoreForState(ts time.Time) {
alertRule.ForEachActiveAlert(func(a *Alert) { alertRule.ForEachActiveAlert(func(a *Alert) {
var s storage.Series var s storage.Series
s, err := alertRule.QueryforStateSeries(a, q) s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q)
if err != nil { if err != nil {
// Querier Warnings are ignored. We do not care unless we have an error. // Querier Warnings are ignored. We do not care unless we have an error.
level.Error(g.logger).Log( level.Error(g.logger).Log(

View file

@ -572,14 +572,14 @@ func TestStaleness(t *testing.T) {
group.Eval(ctx, time.Unix(1, 0).Add(evalDelay)) group.Eval(ctx, time.Unix(1, 0).Add(evalDelay))
group.Eval(ctx, time.Unix(2, 0).Add(evalDelay)) group.Eval(ctx, time.Unix(2, 0).Add(evalDelay))
querier, err := st.Querier(context.Background(), 0, 2000) querier, err := st.Querier(0, 2000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
require.NoError(t, err) require.NoError(t, err)
set := querier.Select(false, nil, matcher) set := querier.Select(ctx, false, nil, matcher)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
require.NoError(t, err) require.NoError(t, err)
@ -696,14 +696,14 @@ func TestDeletedRuleMarkedStale(t *testing.T) {
newGroup.Eval(context.Background(), time.Unix(0, 0)) newGroup.Eval(context.Background(), time.Unix(0, 0))
querier, err := st.Querier(context.Background(), 0, 2000) querier, err := st.Querier(0, 2000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1") matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
require.NoError(t, err) require.NoError(t, err)
set := querier.Select(false, nil, matcher) set := querier.Select(context.Background(), false, nil, matcher)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
require.NoError(t, err) require.NoError(t, err)
@ -1359,14 +1359,14 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
func countStaleNaN(t *testing.T, st storage.Storage) int { func countStaleNaN(t *testing.T, st storage.Storage) int {
var c int var c int
querier, err := st.Querier(context.Background(), 0, time.Now().Unix()*1000) querier, err := st.Querier(0, time.Now().Unix()*1000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "test_2")
require.NoError(t, err) require.NoError(t, err)
set := querier.Select(false, nil, matcher) set := querier.Select(context.Background(), false, nil, matcher)
samples, err := readSeriesSet(set) samples, err := readSeriesSet(set)
require.NoError(t, err) require.NoError(t, err)
@ -1848,9 +1848,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
group.Eval(context.Background(), ts.Add(10*time.Second)) group.Eval(context.Background(), ts.Add(10*time.Second))
q, err := db.Querier(context.Background(), ts.UnixMilli(), ts.Add(20*time.Second).UnixMilli()) q, err := db.Querier(ts.UnixMilli(), ts.Add(20*time.Second).UnixMilli())
require.NoError(t, err) require.NoError(t, err)
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "sum:histogram_metric")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "__name__", "sum:histogram_metric"))
require.True(t, ss.Next()) require.True(t, ss.Next())
s := ss.At() s := ss.At()
require.False(t, ss.Next()) require.False(t, ss.Next())

View file

@ -2925,9 +2925,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
require.Error(t, err) require.Error(t, err)
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err) require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.Equal(t, false, series.Next(), "series found in tsdb") require.Equal(t, false, series.Next(), "series found in tsdb")
require.NoError(t, series.Err()) require.NoError(t, series.Err())
@ -2937,9 +2937,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, slApp.Commit()) require.NoError(t, slApp.Commit())
q, err = s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err = s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err) require.NoError(t, err)
series = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500")) series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchEqual, "le", "500"))
require.Equal(t, true, series.Next(), "series not found in tsdb") require.Equal(t, true, series.Next(), "series not found in tsdb")
require.NoError(t, series.Err()) require.NoError(t, series.Err())
require.Equal(t, false, series.Next(), "more than one series found in tsdb") require.Equal(t, false, series.Next(), "more than one series found in tsdb")
@ -2984,9 +2984,9 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
require.Equal(t, errNameLabelMandatory, err) require.Equal(t, errNameLabelMandatory, err)
q, err := s.Querier(ctx, time.Time{}.UnixNano(), 0) q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err) require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
require.Equal(t, false, series.Next(), "series found in tsdb") require.Equal(t, false, series.Next(), "series found in tsdb")
require.NoError(t, series.Err()) require.NoError(t, series.Err())
} }
@ -3346,9 +3346,9 @@ func TestScrapeReportSingleAppender(t *testing.T) {
start := time.Now() start := time.Now()
for time.Since(start) < 3*time.Second { for time.Since(start) < 3*time.Second {
q, err := s.Querier(ctx, time.Time{}.UnixNano(), time.Now().UnixNano()) q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err) require.NoError(t, err)
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+")) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+"))
c := 0 c := 0
for series.Next() { for series.Next() {
@ -3418,10 +3418,10 @@ func TestScrapeReportLimit(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
q, err := s.Querier(ctx, time.Time{}.UnixNano(), time.Now().UnixNano()) q, err := s.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
require.NoError(t, err) require.NoError(t, err)
defer q.Close() defer q.Close()
series := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "up")) series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "up"))
var found bool var found bool
for series.Next() { for series.Next() {

View file

@ -18,15 +18,15 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v3 uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0
- name: install Go - name: install Go
uses: actions/setup-go@v3 uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
with: with:
go-version: 1.20.x go-version: 1.20.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@v3.4.0 uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
with: with:
version: v1.54.2 version: v1.54.2

View file

@ -42,7 +42,6 @@ func NewBuffer(delta int64) *BufferedSeriesIterator {
// NewBufferIterator returns a new iterator that buffers the values within the // NewBufferIterator returns a new iterator that buffers the values within the
// time range of the current element and the duration of delta before. // time range of the current element and the duration of delta before.
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator { func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
// TODO(codesome): based on encoding, allocate different buffer.
bit := &BufferedSeriesIterator{ bit := &BufferedSeriesIterator{
buf: newSampleRing(delta, 0, chunkenc.ValNone), buf: newSampleRing(delta, 0, chunkenc.ValNone),
delta: delta, delta: delta,

View file

@ -72,15 +72,15 @@ func (f *fanout) StartTime() (int64, error) {
return firstTime, nil return firstTime, nil
} }
func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { func (f *fanout) Querier(mint, maxt int64) (Querier, error) {
primary, err := f.primary.Querier(ctx, mint, maxt) primary, err := f.primary.Querier(mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
secondaries := make([]Querier, 0, len(f.secondaries)) secondaries := make([]Querier, 0, len(f.secondaries))
for _, storage := range f.secondaries { for _, storage := range f.secondaries {
querier, err := storage.Querier(ctx, mint, maxt) querier, err := storage.Querier(mint, maxt)
if err != nil { if err != nil {
// Close already open Queriers, append potential errors to returned error. // Close already open Queriers, append potential errors to returned error.
errs := tsdb_errors.NewMulti(err, primary.Close()) errs := tsdb_errors.NewMulti(err, primary.Close())
@ -94,15 +94,15 @@ func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error)
return NewMergeQuerier([]Querier{primary}, secondaries, ChainedSeriesMerge), nil return NewMergeQuerier([]Querier{primary}, secondaries, ChainedSeriesMerge), nil
} }
func (f *fanout) ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) { func (f *fanout) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error) {
primary, err := f.primary.ChunkQuerier(ctx, mint, maxt) primary, err := f.primary.ChunkQuerier(mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
secondaries := make([]ChunkQuerier, 0, len(f.secondaries)) secondaries := make([]ChunkQuerier, 0, len(f.secondaries))
for _, storage := range f.secondaries { for _, storage := range f.secondaries {
querier, err := storage.ChunkQuerier(ctx, mint, maxt) querier, err := storage.ChunkQuerier(mint, maxt)
if err != nil { if err != nil {
// Close already open Queriers, append potential errors to returned error. // Close already open Queriers, append potential errors to returned error.
errs := tsdb_errors.NewMulti(err, primary.Close()) errs := tsdb_errors.NewMulti(err, primary.Close())

View file

@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
) )
@ -75,14 +76,14 @@ func TestFanout_SelectSorted(t *testing.T) {
fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2) fanoutStorage := storage.NewFanout(nil, priStorage, remoteStorage1, remoteStorage2)
t.Run("querier", func(t *testing.T) { t.Run("querier", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000) querier, err := fanoutStorage.Querier(0, 8000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
require.NoError(t, err) require.NoError(t, err)
seriesSet := querier.Select(true, nil, matcher) seriesSet := querier.Select(ctx, true, nil, matcher)
result := make(map[int64]float64) result := make(map[int64]float64)
var labelsResult labels.Labels var labelsResult labels.Labels
@ -102,14 +103,14 @@ func TestFanout_SelectSorted(t *testing.T) {
require.Equal(t, inputTotalSize, len(result)) require.Equal(t, inputTotalSize, len(result))
}) })
t.Run("chunk querier", func(t *testing.T) { t.Run("chunk querier", func(t *testing.T) {
querier, err := fanoutStorage.ChunkQuerier(ctx, 0, 8000) querier, err := fanoutStorage.ChunkQuerier(0, 8000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a") matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a")
require.NoError(t, err) require.NoError(t, err)
seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(true, nil, matcher)) seriesSet := storage.NewSeriesSetFromChunkSeriesSet(querier.Select(ctx, true, nil, matcher))
result := make(map[int64]float64) result := make(map[int64]float64)
var labelsResult labels.Labels var labelsResult labels.Labels
@ -159,12 +160,12 @@ func TestFanoutErrors(t *testing.T) {
fanoutStorage := storage.NewFanout(nil, tc.primary, tc.secondary) fanoutStorage := storage.NewFanout(nil, tc.primary, tc.secondary)
t.Run("samples", func(t *testing.T) { t.Run("samples", func(t *testing.T) {
querier, err := fanoutStorage.Querier(context.Background(), 0, 8000) querier, err := fanoutStorage.Querier(0, 8000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b") matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
ss := querier.Select(true, nil, matcher) ss := querier.Select(context.Background(), true, nil, matcher)
// Exhaust. // Exhaust.
for ss.Next() { for ss.Next() {
@ -178,18 +179,19 @@ func TestFanoutErrors(t *testing.T) {
if tc.warning != nil { if tc.warning != nil {
require.Greater(t, len(ss.Warnings()), 0, "warnings expected") require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.Error(t, ss.Warnings()[0]) w := ss.Warnings()
require.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error()) require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
} }
}) })
t.Run("chunks", func(t *testing.T) { t.Run("chunks", func(t *testing.T) {
t.Skip("enable once TestStorage and TSDB implements ChunkQuerier") t.Skip("enable once TestStorage and TSDB implements ChunkQuerier")
querier, err := fanoutStorage.ChunkQuerier(context.Background(), 0, 8000) querier, err := fanoutStorage.ChunkQuerier(0, 8000)
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b") matcher := labels.MustNewMatcher(labels.MatchEqual, "a", "b")
ss := querier.Select(true, nil, matcher) ss := querier.Select(context.Background(), true, nil, matcher)
// Exhaust. // Exhaust.
for ss.Next() { for ss.Next() {
@ -203,8 +205,9 @@ func TestFanoutErrors(t *testing.T) {
if tc.warning != nil { if tc.warning != nil {
require.Greater(t, len(ss.Warnings()), 0, "warnings expected") require.Greater(t, len(ss.Warnings()), 0, "warnings expected")
require.Error(t, ss.Warnings()[0]) w := ss.Warnings()
require.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error()) require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
} }
}) })
} }
@ -216,33 +219,33 @@ type errStorage struct{}
type errQuerier struct{} type errQuerier struct{}
func (errStorage) Querier(_ context.Context, _, _ int64) (storage.Querier, error) { func (errStorage) Querier(_, _ int64) (storage.Querier, error) {
return errQuerier{}, nil return errQuerier{}, nil
} }
type errChunkQuerier struct{ errQuerier } type errChunkQuerier struct{ errQuerier }
func (errStorage) ChunkQuerier(_ context.Context, _, _ int64) (storage.ChunkQuerier, error) { func (errStorage) ChunkQuerier(_, _ int64) (storage.ChunkQuerier, error) {
return errChunkQuerier{}, nil return errChunkQuerier{}, nil
} }
func (errStorage) Appender(_ context.Context) storage.Appender { return nil } func (errStorage) Appender(_ context.Context) storage.Appender { return nil }
func (errStorage) StartTime() (int64, error) { return 0, nil } func (errStorage) StartTime() (int64, error) { return 0, nil }
func (errStorage) Close() error { return nil } func (errStorage) Close() error { return nil }
func (errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet { func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
return storage.ErrSeriesSet(errSelect) return storage.ErrSeriesSet(errSelect)
} }
func (errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error") return nil, nil, errors.New("label values error")
} }
func (errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error") return nil, nil, errors.New("label names error")
} }
func (errQuerier) Close() error { return nil } func (errQuerier) Close() error { return nil }
func (errChunkQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.ChunkSeriesSet { func (errChunkQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels.Matcher) storage.ChunkSeriesSet {
return storage.ErrChunkSeriesSet(errSelect) return storage.ErrChunkSeriesSet(errSelect)
} }

View file

@ -17,19 +17,22 @@
package storage package storage
import ( import (
"context"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/annotations"
) )
type genericQuerier interface { type genericQuerier interface {
LabelQuerier LabelQuerier
Select(bool, *SelectHints, ...*labels.Matcher) genericSeriesSet Select(context.Context, bool, *SelectHints, ...*labels.Matcher) genericSeriesSet
} }
type genericSeriesSet interface { type genericSeriesSet interface {
Next() bool Next() bool
At() Labels At() Labels
Err() error Err() error
Warnings() Warnings Warnings() annotations.Annotations
} }
type genericSeriesMergeFunc func(...Labels) Labels type genericSeriesMergeFunc func(...Labels) Labels
@ -58,11 +61,11 @@ type genericQuerierAdapter struct {
cq ChunkQuerier cq ChunkQuerier
} }
func (q *genericQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { func (q *genericQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if q.q != nil { if q.q != nil {
return &genericSeriesSetAdapter{q.q.Select(sortSeries, hints, matchers...)} return &genericSeriesSetAdapter{q.q.Select(ctx, sortSeries, hints, matchers...)}
} }
return &genericChunkSeriesSetAdapter{q.cq.Select(sortSeries, hints, matchers...)} return &genericChunkSeriesSetAdapter{q.cq.Select(ctx, sortSeries, hints, matchers...)}
} }
func newGenericQuerierFrom(q Querier) genericQuerier { func newGenericQuerierFrom(q Querier) genericQuerier {
@ -85,8 +88,8 @@ func (a *seriesSetAdapter) At() Series {
return a.genericSeriesSet.At().(Series) return a.genericSeriesSet.At().(Series)
} }
func (q *querierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { func (q *querierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
return &seriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)} return &seriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)}
} }
type chunkQuerierAdapter struct { type chunkQuerierAdapter struct {
@ -101,8 +104,8 @@ func (a *chunkSeriesSetAdapter) At() ChunkSeries {
return a.genericSeriesSet.At().(ChunkSeries) return a.genericSeriesSet.At().(ChunkSeries)
} }
func (q *chunkQuerierAdapter) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet { func (q *chunkQuerierAdapter) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet {
return &chunkSeriesSetAdapter{q.genericQuerier.Select(sortSeries, hints, matchers...)} return &chunkSeriesSetAdapter{q.genericQuerier.Select(ctx, sortSeries, hints, matchers...)}
} }
type seriesMergerAdapter struct { type seriesMergerAdapter struct {
@ -137,4 +140,4 @@ func (noopGenericSeriesSet) At() Labels { return nil }
func (noopGenericSeriesSet) Err() error { return nil } func (noopGenericSeriesSet) Err() error { return nil }
func (noopGenericSeriesSet) Warnings() Warnings { return nil } func (noopGenericSeriesSet) Warnings() annotations.Annotations { return nil }

View file

@ -24,6 +24,7 @@ import (
"github.com/prometheus/prometheus/model/metadata" "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/util/annotations"
) )
// The errors exposed. // The errors exposed.
@ -91,7 +92,7 @@ type ExemplarStorage interface {
// Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL. // Use it when you need to have access to all samples without chunk encoding abstraction e.g promQL.
type Queryable interface { type Queryable interface {
// Querier returns a new Querier on the storage. // Querier returns a new Querier on the storage.
Querier(ctx context.Context, mint, maxt int64) (Querier, error) Querier(mint, maxt int64) (Querier, error)
} }
// A MockQueryable is used for testing purposes so that a mock Querier can be used. // A MockQueryable is used for testing purposes so that a mock Querier can be used.
@ -99,7 +100,7 @@ type MockQueryable struct {
MockQuerier Querier MockQuerier Querier
} }
func (q *MockQueryable) Querier(context.Context, int64, int64) (Querier, error) { func (q *MockQueryable) Querier(int64, int64) (Querier, error) {
return q.MockQuerier, nil return q.MockQuerier, nil
} }
@ -110,7 +111,7 @@ type Querier interface {
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
// MockQuerier is used for test purposes to mock the selected series that is returned. // MockQuerier is used for test purposes to mock the selected series that is returned.
@ -118,11 +119,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
func (q *MockQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (q *MockQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -130,7 +131,7 @@ func (q *MockQuerier) Close() error {
return nil return nil
} }
func (q *MockQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet { func (q *MockQuerier) Select(_ context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet {
return q.SelectMockFunction(sortSeries, hints, matchers...) return q.SelectMockFunction(sortSeries, hints, matchers...)
} }
@ -138,7 +139,7 @@ func (q *MockQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*l
// Use it when you need to have access to samples in encoded format. // Use it when you need to have access to samples in encoded format.
type ChunkQueryable interface { type ChunkQueryable interface {
// ChunkQuerier returns a new ChunkQuerier on the storage. // ChunkQuerier returns a new ChunkQuerier on the storage.
ChunkQuerier(ctx context.Context, mint, maxt int64) (ChunkQuerier, error) ChunkQuerier(mint, maxt int64) (ChunkQuerier, error)
} }
// ChunkQuerier provides querying access over time series data of a fixed time range. // ChunkQuerier provides querying access over time series data of a fixed time range.
@ -148,7 +149,7 @@ type ChunkQuerier interface {
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
// Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance.
// It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all.
Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet
} }
// LabelQuerier provides querying access over labels. // LabelQuerier provides querying access over labels.
@ -157,12 +158,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier. // It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order. // LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers. // to label names of metrics matching the matchers.
LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier. // Close releases the resources of the Querier.
Close() error Close() error
@ -205,11 +206,11 @@ type SelectHints struct {
// TODO(bwplotka): Move to promql/engine_test.go? // TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.
type QueryableFunc func(ctx context.Context, mint, maxt int64) (Querier, error) type QueryableFunc func(mint, maxt int64) (Querier, error)
// Querier calls f() with the given parameters. // Querier calls f() with the given parameters.
func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier, error) { func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) {
return f(ctx, mint, maxt) return f(mint, maxt)
} }
// Appender provides batched appends against a storage. // Appender provides batched appends against a storage.
@ -310,7 +311,7 @@ type SeriesSet interface {
Err() error Err() error
// A collection of warnings for the whole set. // A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error. // Warnings could be return even iteration has not failed with error.
Warnings() Warnings Warnings() annotations.Annotations
} }
var emptySeriesSet = errSeriesSet{} var emptySeriesSet = errSeriesSet{}
@ -324,10 +325,10 @@ type testSeriesSet struct {
series Series series Series
} }
func (s testSeriesSet) Next() bool { return true } func (s testSeriesSet) Next() bool { return true }
func (s testSeriesSet) At() Series { return s.series } func (s testSeriesSet) At() Series { return s.series }
func (s testSeriesSet) Err() error { return nil } func (s testSeriesSet) Err() error { return nil }
func (s testSeriesSet) Warnings() Warnings { return nil } func (s testSeriesSet) Warnings() annotations.Annotations { return nil }
// TestSeriesSet returns a mock series set // TestSeriesSet returns a mock series set
func TestSeriesSet(series Series) SeriesSet { func TestSeriesSet(series Series) SeriesSet {
@ -338,10 +339,10 @@ type errSeriesSet struct {
err error err error
} }
func (s errSeriesSet) Next() bool { return false } func (s errSeriesSet) Next() bool { return false }
func (s errSeriesSet) At() Series { return nil } func (s errSeriesSet) At() Series { return nil }
func (s errSeriesSet) Err() error { return s.err } func (s errSeriesSet) Err() error { return s.err }
func (s errSeriesSet) Warnings() Warnings { return nil } func (s errSeriesSet) Warnings() annotations.Annotations { return nil }
// ErrSeriesSet returns a series set that wraps an error. // ErrSeriesSet returns a series set that wraps an error.
func ErrSeriesSet(err error) SeriesSet { func ErrSeriesSet(err error) SeriesSet {
@ -359,10 +360,10 @@ type errChunkSeriesSet struct {
err error err error
} }
func (s errChunkSeriesSet) Next() bool { return false } func (s errChunkSeriesSet) Next() bool { return false }
func (s errChunkSeriesSet) At() ChunkSeries { return nil } func (s errChunkSeriesSet) At() ChunkSeries { return nil }
func (s errChunkSeriesSet) Err() error { return s.err } func (s errChunkSeriesSet) Err() error { return s.err }
func (s errChunkSeriesSet) Warnings() Warnings { return nil } func (s errChunkSeriesSet) Warnings() annotations.Annotations { return nil }
// ErrChunkSeriesSet returns a chunk series set that wraps an error. // ErrChunkSeriesSet returns a chunk series set that wraps an error.
func ErrChunkSeriesSet(err error) ChunkSeriesSet { func ErrChunkSeriesSet(err error) ChunkSeriesSet {
@ -408,7 +409,7 @@ type ChunkSeriesSet interface {
Err() error Err() error
// A collection of warnings for the whole set. // A collection of warnings for the whole set.
// Warnings could be return even iteration has not failed with error. // Warnings could be return even iteration has not failed with error.
Warnings() Warnings Warnings() annotations.Annotations
} }
// ChunkSeries exposes a single time series and allows iterating over chunks. // ChunkSeries exposes a single time series and allows iterating over chunks.
@ -442,5 +443,3 @@ type ChunkIterable interface {
// chunks of the series, sorted by min time. // chunks of the series, sorted by min time.
Iterator(chunks.Iterator) chunks.Iterator Iterator(chunks.Iterator) chunks.Iterator
} }
type Warnings []error

View file

@ -13,6 +13,10 @@
package storage package storage
import (
"github.com/prometheus/prometheus/util/annotations"
)
// lazyGenericSeriesSet is a wrapped series set that is initialised on first call to Next(). // lazyGenericSeriesSet is a wrapped series set that is initialised on first call to Next().
type lazyGenericSeriesSet struct { type lazyGenericSeriesSet struct {
init func() (genericSeriesSet, bool) init func() (genericSeriesSet, bool)
@ -43,25 +47,25 @@ func (c *lazyGenericSeriesSet) At() Labels {
return nil return nil
} }
func (c *lazyGenericSeriesSet) Warnings() Warnings { func (c *lazyGenericSeriesSet) Warnings() annotations.Annotations {
if c.set != nil { if c.set != nil {
return c.set.Warnings() return c.set.Warnings()
} }
return nil return nil
} }
type warningsOnlySeriesSet Warnings type warningsOnlySeriesSet annotations.Annotations
func (warningsOnlySeriesSet) Next() bool { return false } func (warningsOnlySeriesSet) Next() bool { return false }
func (warningsOnlySeriesSet) Err() error { return nil } func (warningsOnlySeriesSet) Err() error { return nil }
func (warningsOnlySeriesSet) At() Labels { return nil } func (warningsOnlySeriesSet) At() Labels { return nil }
func (c warningsOnlySeriesSet) Warnings() Warnings { return Warnings(c) } func (c warningsOnlySeriesSet) Warnings() annotations.Annotations { return annotations.Annotations(c) }
type errorOnlySeriesSet struct { type errorOnlySeriesSet struct {
err error err error
} }
func (errorOnlySeriesSet) Next() bool { return false } func (errorOnlySeriesSet) Next() bool { return false }
func (errorOnlySeriesSet) At() Labels { return nil } func (errorOnlySeriesSet) At() Labels { return nil }
func (s errorOnlySeriesSet) Err() error { return s.err } func (s errorOnlySeriesSet) Err() error { return s.err }
func (errorOnlySeriesSet) Warnings() Warnings { return nil } func (errorOnlySeriesSet) Warnings() annotations.Annotations { return nil }

View file

@ -16,6 +16,7 @@ package storage
import ( import (
"bytes" "bytes"
"container/heap" "container/heap"
"context"
"fmt" "fmt"
"math" "math"
"sync" "sync"
@ -27,6 +28,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/util/annotations"
) )
type mergeGenericQuerier struct { type mergeGenericQuerier struct {
@ -97,19 +99,19 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
} }
// Select returns a set of series that matches the given label matchers. // Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if len(q.queriers) == 0 { if len(q.queriers) == 0 {
return noopGenericSeriesSet{} return noopGenericSeriesSet{}
} }
if len(q.queriers) == 1 { if len(q.queriers) == 1 {
return q.queriers[0].Select(sortSeries, hints, matchers...) return q.queriers[0].Select(ctx, sortSeries, hints, matchers...)
} }
seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
if !q.concurrentSelect { if !q.concurrentSelect {
for _, querier := range q.queriers { for _, querier := range q.queriers {
// We need to sort for merge to work. // We need to sort for merge to work.
seriesSets = append(seriesSets, querier.Select(true, hints, matchers...)) seriesSets = append(seriesSets, querier.Select(ctx, true, hints, matchers...))
} }
return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) {
s := newGenericMergeSeriesSet(seriesSets, q.mergeFn) s := newGenericMergeSeriesSet(seriesSets, q.mergeFn)
@ -128,7 +130,7 @@ func (q *mergeGenericQuerier) Select(sortSeries bool, hints *SelectHints, matche
defer wg.Done() defer wg.Done()
// We need to sort for NewMergeSeriesSet to work. // We need to sort for NewMergeSeriesSet to work.
seriesSetChan <- qr.Select(true, hints, matchers...) seriesSetChan <- qr.Select(ctx, true, hints, matchers...)
}(querier) }(querier)
} }
go func() { go func() {
@ -157,8 +159,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(q.queriers, name, matchers...) res, ws, err := q.lvals(ctx, q.queriers, name, matchers...)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
} }
@ -166,23 +168,23 @@ func (q *mergeGenericQuerier) LabelValues(name string, matchers ...*labels.Match
} }
// lvals performs merge sort for LabelValues from multiple queriers. // lvals performs merge sort for LabelValues from multiple queriers.
func (q *mergeGenericQuerier) lvals(lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 { if lq.Len() == 0 {
return nil, nil, nil return nil, nil, nil
} }
if lq.Len() == 1 { if lq.Len() == 1 {
return lq.Get(0).LabelValues(n, matchers...) return lq.Get(0).LabelValues(ctx, n, matchers...)
} }
a, b := lq.SplitByHalf() a, b := lq.SplitByHalf()
var ws Warnings var ws annotations.Annotations
s1, w, err := q.lvals(a, n, matchers...) s1, w, err := q.lvals(ctx, a, n, matchers...)
ws = append(ws, w...) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
} }
s2, ws, err := q.lvals(b, n, matchers...) s2, ws, err := q.lvals(ctx, b, n, matchers...)
ws = append(ws, w...) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
} }
@ -217,16 +219,16 @@ func mergeStrings(a, b []string) []string {
} }
// LabelNames returns all the unique label names present in all queriers in sorted order. // LabelNames returns all the unique label names present in all queriers in sorted order.
func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var ( var (
labelNamesMap = make(map[string]struct{}) labelNamesMap = make(map[string]struct{})
warnings Warnings warnings annotations.Annotations
) )
for _, querier := range q.queriers { for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames(matchers...) names, wrn, err := querier.LabelNames(ctx, matchers...)
if wrn != nil { if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings. // TODO(bwplotka): We could potentially wrap warnings.
warnings = append(warnings, wrn...) warnings.Merge(wrn)
} }
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err) return nil, nil, fmt.Errorf("LabelNames() from merge generic querier: %w", err)
@ -381,10 +383,10 @@ func (c *genericMergeSeriesSet) Err() error {
return nil return nil
} }
func (c *genericMergeSeriesSet) Warnings() Warnings { func (c *genericMergeSeriesSet) Warnings() annotations.Annotations {
var ws Warnings var ws annotations.Annotations
for _, set := range c.sets { for _, set := range c.sets {
ws = append(ws, set.Warnings()...) ws.Merge(set.Warnings())
} }
return ws return ws
} }

View file

@ -14,6 +14,7 @@
package storage package storage
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"math" "math"
@ -27,6 +28,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
) )
func TestMergeQuerierWithChainMerger(t *testing.T) { func TestMergeQuerierWithChainMerger(t *testing.T) {
@ -187,7 +189,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
mergedQuerier := NewMergeQuerier([]Querier{p}, qs, ChainedSeriesMerge).Select(false, nil) mergedQuerier := NewMergeQuerier([]Querier{p}, qs, ChainedSeriesMerge).Select(context.Background(), false, nil)
// Get all merged series upfront to make sure there are no incorrectly retained shared // Get all merged series upfront to make sure there are no incorrectly retained shared
// buffers causing bugs. // buffers causing bugs.
@ -363,7 +365,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
merged := NewMergeChunkQuerier([]ChunkQuerier{p}, qs, NewCompactingChunkSeriesMerger(nil)).Select(false, nil) merged := NewMergeChunkQuerier([]ChunkQuerier{p}, qs, NewCompactingChunkSeriesMerger(nil)).Select(context.Background(), false, nil)
for merged.Next() { for merged.Next() {
require.True(t, tc.expected.Next(), "Expected Next() to be true") require.True(t, tc.expected.Next(), "Expected Next() to be true")
actualSeries := merged.At() actualSeries := merged.At()
@ -737,7 +739,7 @@ func (a seriesByLabel) Len() int { return len(a) }
func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
func (m *mockQuerier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet { func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet {
cpy := make([]Series, len(m.toReturn)) cpy := make([]Series, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {
@ -761,7 +763,7 @@ func (a chunkSeriesByLabel) Less(i, j int) bool {
return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 return labels.Compare(a[i].Labels(), a[j].Labels()) < 0
} }
func (m *mockChunkQurier) Select(sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet { func (m *mockChunkQurier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet {
cpy := make([]ChunkSeries, len(m.toReturn)) cpy := make([]ChunkSeries, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {
@ -792,7 +794,7 @@ func (m *mockSeriesSet) At() Series { return m.series[m.idx] }
func (m *mockSeriesSet) Err() error { return nil } func (m *mockSeriesSet) Err() error { return nil }
func (m *mockSeriesSet) Warnings() Warnings { return nil } func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil }
type mockChunkSeriesSet struct { type mockChunkSeriesSet struct {
idx int idx int
@ -815,7 +817,7 @@ func (m *mockChunkSeriesSet) At() ChunkSeries { return m.series[m.idx] }
func (m *mockChunkSeriesSet) Err() error { return nil } func (m *mockChunkSeriesSet) Err() error { return nil }
func (m *mockChunkSeriesSet) Warnings() Warnings { return nil } func (m *mockChunkSeriesSet) Warnings() annotations.Annotations { return nil }
func TestChainSampleIterator(t *testing.T) { func TestChainSampleIterator(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
@ -989,7 +991,7 @@ type mockGenericQuerier struct {
sortedSeriesRequested []bool sortedSeriesRequested []bool
resp []string resp []string
warnings Warnings warnings annotations.Annotations
err error err error
} }
@ -998,14 +1000,14 @@ type labelNameRequest struct {
matchers []*labels.Matcher matchers []*labels.Matcher
} }
func (m *mockGenericQuerier) Select(b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet { func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet {
m.mtx.Lock() m.mtx.Lock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, b) m.sortedSeriesRequested = append(m.sortedSeriesRequested, b)
m.mtx.Unlock() m.mtx.Unlock()
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err} return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
} }
func (m *mockGenericQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock() m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name, name: name,
@ -1015,7 +1017,7 @@ func (m *mockGenericQuerier) LabelValues(name string, matchers ...*labels.Matche
return m.resp, m.warnings, m.err return m.resp, m.warnings, m.err
} }
func (m *mockGenericQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock() m.mtx.Lock()
m.labelNamesCalls++ m.labelNamesCalls++
m.mtx.Unlock() m.mtx.Unlock()
@ -1029,7 +1031,7 @@ func (m *mockGenericQuerier) Close() error {
type mockGenericSeriesSet struct { type mockGenericSeriesSet struct {
resp []string resp []string
warnings Warnings warnings annotations.Annotations
err error err error
curr int curr int
@ -1046,8 +1048,8 @@ func (m *mockGenericSeriesSet) Next() bool {
return true return true
} }
func (m *mockGenericSeriesSet) Err() error { return m.err } func (m *mockGenericSeriesSet) Err() error { return m.err }
func (m *mockGenericSeriesSet) Warnings() Warnings { return m.warnings } func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings }
func (m *mockGenericSeriesSet) At() Labels { func (m *mockGenericSeriesSet) At() Labels {
return mockLabels(m.resp[m.curr-1]) return mockLabels(m.resp[m.curr-1])
@ -1074,6 +1076,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
var ( var (
errStorage = errors.New("storage error") errStorage = errors.New("storage error")
warnStorage = errors.New("storage warning") warnStorage = errors.New("storage warning")
ctx = context.Background()
) )
for _, tcase := range []struct { for _, tcase := range []struct {
name string name string
@ -1082,10 +1085,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
expectedSelectsSeries []labels.Labels expectedSelectsSeries []labels.Labels
expectedLabels []string expectedLabels []string
expectedWarnings [4]Warnings expectedWarnings annotations.Annotations
expectedErrs [4]error expectedErrs [4]error
}{ }{
{},
{ {
name: "one successful primary querier", name: "one successful primary querier",
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
@ -1159,31 +1161,21 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
}, },
expectedLabels: []string{"a"}, expectedLabels: []string{"a"},
expectedWarnings: [4]Warnings{ expectedWarnings: annotations.New().Add(errStorage),
[]error{errStorage, errStorage},
[]error{errStorage, errStorage},
[]error{errStorage, errStorage},
[]error{errStorage, errStorage},
},
}, },
{ {
name: "successful queriers with warnings", name: "successful queriers with warnings",
queriers: []genericQuerier{ queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a"}, warnings: []error{warnStorage}, err: nil}, &mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: []error{warnStorage}, err: nil}}, &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}},
}, },
expectedSelectsSeries: []labels.Labels{ expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"), labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"), labels.FromStrings("test", "b"),
}, },
expectedLabels: []string{"a", "b"}, expectedLabels: []string{"a", "b"},
expectedWarnings: [4]Warnings{ expectedWarnings: annotations.New().Add(warnStorage),
[]error{warnStorage, warnStorage},
[]error{warnStorage, warnStorage},
[]error{warnStorage, warnStorage},
[]error{warnStorage, warnStorage},
},
}, },
} { } {
t.Run(tcase.name, func(t *testing.T) { t.Run(tcase.name, func(t *testing.T) {
@ -1193,12 +1185,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
} }
t.Run("Select", func(t *testing.T) { t.Run("Select", func(t *testing.T) {
res := q.Select(false, nil) res := q.Select(context.Background(), false, nil)
var lbls []labels.Labels var lbls []labels.Labels
for res.Next() { for res.Next() {
lbls = append(lbls, res.At().Labels()) lbls = append(lbls, res.At().Labels())
} }
require.Equal(t, tcase.expectedWarnings[0], res.Warnings()) require.Subset(t, tcase.expectedWarnings, res.Warnings())
require.Equal(t, tcase.expectedErrs[0], res.Err()) require.Equal(t, tcase.expectedErrs[0], res.Err())
require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match") require.True(t, errors.Is(res.Err(), tcase.expectedErrs[0]), "expected error doesn't match")
require.Equal(t, tcase.expectedSelectsSeries, lbls) require.Equal(t, tcase.expectedSelectsSeries, lbls)
@ -1214,8 +1206,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
} }
}) })
t.Run("LabelNames", func(t *testing.T) { t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames() res, w, err := q.LabelNames(ctx)
require.Equal(t, tcase.expectedWarnings[1], w) require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match") require.True(t, errors.Is(err, tcase.expectedErrs[1]), "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)
@ -1229,8 +1221,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
} }
}) })
t.Run("LabelValues", func(t *testing.T) { t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues("test") res, w, err := q.LabelValues(ctx, "test")
require.Equal(t, tcase.expectedWarnings[2], w) require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match") require.True(t, errors.Is(err, tcase.expectedErrs[2]), "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)
@ -1245,8 +1237,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}) })
t.Run("LabelValuesWithMatchers", func(t *testing.T) { t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues("test2", matcher) res, w, err := q.LabelValues(ctx, "test2", matcher)
require.Equal(t, tcase.expectedWarnings[3], w) require.Subset(t, tcase.expectedWarnings, w)
require.True(t, errors.Is(err, tcase.expectedErrs[3]), "expected error doesn't match") require.True(t, errors.Is(err, tcase.expectedErrs[3]), "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)

View file

@ -14,7 +14,10 @@
package storage package storage
import ( import (
"context"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/annotations"
) )
type noopQuerier struct{} type noopQuerier struct{}
@ -24,15 +27,15 @@ func NoopQuerier() Querier {
return noopQuerier{} return noopQuerier{}
} }
func (noopQuerier) Select(bool, *SelectHints, ...*labels.Matcher) SeriesSet { func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) SeriesSet {
return NoopSeriesSet() return NoopSeriesSet()
} }
func (noopQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -47,15 +50,15 @@ func NoopChunkedQuerier() ChunkQuerier {
return noopChunkQuerier{} return noopChunkQuerier{}
} }
func (noopChunkQuerier) Select(bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet { func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matcher) ChunkSeriesSet {
return NoopChunkedSeriesSet() return NoopChunkedSeriesSet()
} }
func (noopChunkQuerier) LabelValues(string, ...*labels.Matcher) ([]string, Warnings, error) { func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopChunkQuerier) LabelNames(...*labels.Matcher) ([]string, Warnings, error) { func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -76,7 +79,7 @@ func (noopSeriesSet) At() Series { return nil }
func (noopSeriesSet) Err() error { return nil } func (noopSeriesSet) Err() error { return nil }
func (noopSeriesSet) Warnings() Warnings { return nil } func (noopSeriesSet) Warnings() annotations.Annotations { return nil }
type noopChunkedSeriesSet struct{} type noopChunkedSeriesSet struct{}
@ -91,4 +94,4 @@ func (noopChunkedSeriesSet) At() ChunkSeries { return nil }
func (noopChunkedSeriesSet) Err() error { return nil } func (noopChunkedSeriesSet) Err() error { return nil }
func (noopChunkedSeriesSet) Warnings() Warnings { return nil } func (noopChunkedSeriesSet) Warnings() annotations.Annotations { return nil }

View file

@ -38,6 +38,7 @@ import (
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/util/annotations"
) )
const ( const (
@ -122,7 +123,7 @@ func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHi
} }
// ToQueryResult builds a QueryResult proto. // ToQueryResult builds a QueryResult proto.
func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) { func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, annotations.Annotations, error) {
numSamples := 0 numSamples := 0
resp := &prompb.QueryResult{} resp := &prompb.QueryResult{}
var iter chunkenc.Iterator var iter chunkenc.Iterator
@ -224,7 +225,7 @@ func StreamChunkedReadResponses(
sortedExternalLabels []prompb.Label, sortedExternalLabels []prompb.Label,
maxBytesInFrame int, maxBytesInFrame int,
marshalPool *sync.Pool, marshalPool *sync.Pool,
) (storage.Warnings, error) { ) (annotations.Annotations, error) {
var ( var (
chks []prompb.Chunk chks []prompb.Chunk
lbls []prompb.Label lbls []prompb.Label
@ -340,7 +341,7 @@ func (e errSeriesSet) Err() error {
return e.err return e.err
} }
func (e errSeriesSet) Warnings() storage.Warnings { return nil } func (e errSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeriesSet implements storage.SeriesSet. // concreteSeriesSet implements storage.SeriesSet.
type concreteSeriesSet struct { type concreteSeriesSet struct {
@ -361,7 +362,7 @@ func (c *concreteSeriesSet) Err() error {
return nil return nil
} }
func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil } func (c *concreteSeriesSet) Warnings() annotations.Annotations { return nil }
// concreteSeries implements storage.Series. // concreteSeries implements storage.Series.
type concreteSeries struct { type concreteSeries struct {

View file

@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
) )
var testHistogram = histogram.Histogram{ var testHistogram = histogram.Histogram{
@ -810,7 +811,7 @@ func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
} }
} }
func (c *mockChunkSeriesSet) Warnings() storage.Warnings { return nil } func (c *mockChunkSeriesSet) Warnings() annotations.Annotations { return nil }
func (c *mockChunkSeriesSet) Err() error { func (c *mockChunkSeriesSet) Err() error {
return nil return nil

View file

@ -20,6 +20,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
) )
type sampleAndChunkQueryableClient struct { type sampleAndChunkQueryableClient struct {
@ -48,9 +49,8 @@ func NewSampleAndChunkQueryableClient(
} }
} }
func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (c *sampleAndChunkQueryableClient) Querier(mint, maxt int64) (storage.Querier, error) {
q := &querier{ q := &querier{
ctx: ctx,
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
client: c.client, client: c.client,
@ -75,10 +75,9 @@ func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt
return q, nil return q, nil
} }
func (c *sampleAndChunkQueryableClient) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
cq := &chunkQuerier{ cq := &chunkQuerier{
querier: querier{ querier: querier{
ctx: ctx,
mint: mint, mint: mint,
maxt: maxt, maxt: maxt,
client: c.client, client: c.client,
@ -125,7 +124,6 @@ func (c *sampleAndChunkQueryableClient) preferLocalStorage(mint, maxt int64) (cm
} }
type querier struct { type querier struct {
ctx context.Context
mint, maxt int64 mint, maxt int64
client ReadClient client ReadClient
@ -140,7 +138,7 @@ type querier struct {
// //
// If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the // If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the
// requiredMatchers. Otherwise it'll just call remote endpoint. // requiredMatchers. Otherwise it'll just call remote endpoint.
func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { func (q *querier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
if len(q.requiredMatchers) > 0 { if len(q.requiredMatchers) > 0 {
// Copy to not modify slice configured by user. // Copy to not modify slice configured by user.
requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...) requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...)
@ -167,7 +165,7 @@ func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers .
return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err)) return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err))
} }
res, err := q.client.Read(q.ctx, query) res, err := q.client.Read(ctx, query)
if err != nil { if err != nil {
return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err)) return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err))
} }
@ -212,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
} }
// LabelValues implements storage.Querier and is a noop. // LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }
// LabelNames implements storage.Querier and is a noop. // LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }
@ -235,9 +233,9 @@ type chunkQuerier struct {
// Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client. // Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client.
// It uses remote.querier.Select so it supports external labels and required matchers if specified. // It uses remote.querier.Select so it supports external labels and required matchers if specified.
func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { func (q *chunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
// TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket). // TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket).
return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...)) return storage.NewSeriesSetToChunkSet(q.querier.Select(ctx, sortSeries, hints, matchers...))
} }
// Note strings in toFilter must be sorted. // Note strings in toFilter must be sorted.

View file

@ -27,6 +27,7 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/gate" "github.com/prometheus/prometheus/util/gate"
) )
@ -131,7 +132,7 @@ func (h *readHandler) remoteReadSamples(
return err return err
} }
querier, err := h.queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs) querier, err := h.queryable.Querier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil { if err != nil {
return err return err
} }
@ -154,8 +155,8 @@ func (h *readHandler) remoteReadSamples(
} }
} }
var ws storage.Warnings var ws annotations.Annotations
resp.Results[i], ws, err = ToQueryResult(querier.Select(false, hints, filteredMatchers...), h.remoteReadSampleLimit) resp.Results[i], ws, err = ToQueryResult(querier.Select(ctx, false, hints, filteredMatchers...), h.remoteReadSampleLimit)
if err != nil { if err != nil {
return err return err
} }
@ -198,7 +199,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
return err return err
} }
querier, err := h.queryable.ChunkQuerier(ctx, query.StartTimestampMs, query.EndTimestampMs) querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil { if err != nil {
return err return err
} }
@ -225,7 +226,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
NewChunkedWriter(w, f), NewChunkedWriter(w, f),
int64(i), int64(i),
// The streaming API has to provide the series sorted. // The streaming API has to provide the series sorted.
querier.Select(true, hints, filteredMatchers...), querier.Select(ctx, true, hints, filteredMatchers...),
sortedExternalLabels, sortedExternalLabels,
h.remoteReadMaxBytesInFrame, h.remoteReadMaxBytesInFrame,
h.marshalPool, h.marshalPool,

View file

@ -27,7 +27,7 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations"
) )
func TestNoDuplicateReadConfigs(t *testing.T) { func TestNoDuplicateReadConfigs(t *testing.T) {
@ -469,13 +469,13 @@ func TestSampleAndChunkQueryableClient(t *testing.T) {
tc.readRecent, tc.readRecent,
tc.callback, tc.callback,
) )
q, err := c.Querier(context.TODO(), tc.mint, tc.maxt) q, err := c.Querier(tc.mint, tc.maxt)
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, q.Close()) defer require.NoError(t, q.Close())
ss := q.Select(true, nil, tc.matchers...) ss := q.Select(context.Background(), true, nil, tc.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, storage.Warnings(nil), ss.Warnings()) require.Equal(t, annotations.Annotations(nil), ss.Warnings())
require.Equal(t, tc.expectedQuery, m.got) require.Equal(t, tc.expectedQuery, m.got)

View file

@ -152,14 +152,14 @@ func (s *Storage) StartTime() (int64, error) {
// Returned querier will never return error as all queryables are assumed best effort. // Returned querier will never return error as all queryables are assumed best effort.
// Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke. // Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke.
// This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design. // This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design.
func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (s *Storage) Querier(mint, maxt int64) (storage.Querier, error) {
s.mtx.Lock() s.mtx.Lock()
queryables := s.queryables queryables := s.queryables
s.mtx.Unlock() s.mtx.Unlock()
queriers := make([]storage.Querier, 0, len(queryables)) queriers := make([]storage.Querier, 0, len(queryables))
for _, queryable := range queryables { for _, queryable := range queryables {
q, err := queryable.Querier(ctx, mint, maxt) q, err := queryable.Querier(mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -170,14 +170,14 @@ func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querie
// ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers // ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers
// of each configured remote read endpoint. // of each configured remote read endpoint.
func (s *Storage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (s *Storage) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
s.mtx.Lock() s.mtx.Lock()
queryables := s.queryables queryables := s.queryables
s.mtx.Unlock() s.mtx.Unlock()
queriers := make([]storage.ChunkQuerier, 0, len(queryables)) queriers := make([]storage.ChunkQuerier, 0, len(queryables))
for _, queryable := range queryables { for _, queryable := range queryables {
q, err := queryable.ChunkQuerier(ctx, mint, maxt) q, err := queryable.ChunkQuerier(mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -14,9 +14,11 @@
package storage package storage
import ( import (
"context"
"sync" "sync"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/util/annotations"
) )
// secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner. // secondaryQuerier is a wrapper that allows a querier to be treated in a best effort manner.
@ -47,28 +49,28 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
} }
func (s *secondaryQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, Warnings, error) { func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
vals, w, err := s.genericQuerier.LabelValues(name, matchers...) vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...)
if err != nil { if err != nil {
return nil, append([]error{err}, w...), nil return nil, w.Add(err), nil
} }
return vals, w, nil return vals, w, nil
} }
func (s *secondaryQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, Warnings, error) { func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
names, w, err := s.genericQuerier.LabelNames(matchers...) names, w, err := s.genericQuerier.LabelNames(ctx, matchers...)
if err != nil { if err != nil {
return nil, append([]error{err}, w...), nil return nil, w.Add(err), nil
} }
return names, w, nil return names, w, nil
} }
func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { func (s *secondaryQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
if s.done { if s.done {
panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done") panic("secondaryQuerier: Select invoked after first Next of any returned SeriesSet was done")
} }
s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(sortSeries, hints, matchers...)) s.asyncSets = append(s.asyncSets, s.genericQuerier.Select(ctx, sortSeries, hints, matchers...))
curr := len(s.asyncSets) - 1 curr := len(s.asyncSets) - 1
return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) { return &lazyGenericSeriesSet{init: func() (genericSeriesSet, bool) {
s.once.Do(func() { s.once.Do(func() {
@ -82,7 +84,7 @@ func (s *secondaryQuerier) Select(sortSeries bool, hints *SelectHints, matchers
if err := set.Err(); err != nil { if err := set.Err(); err != nil {
// One of the sets failed, ensure current one returning errors as warnings, and rest of the sets return nothing. // One of the sets failed, ensure current one returning errors as warnings, and rest of the sets return nothing.
// (All or nothing logic). // (All or nothing logic).
s.asyncSets[curr] = warningsOnlySeriesSet(append([]error{err}, ws...)) s.asyncSets[curr] = warningsOnlySeriesSet(ws.Add(err))
for i := range s.asyncSets { for i := range s.asyncSets {
if curr == i { if curr == i {
continue continue

View file

@ -716,12 +716,12 @@ func (db *DB) StartTime() (int64, error) {
} }
// Querier implements the Storage interface. // Querier implements the Storage interface.
func (db *DB) Querier(context.Context, int64, int64) (storage.Querier, error) { func (db *DB) Querier(int64, int64) (storage.Querier, error) {
return nil, ErrUnsupported return nil, ErrUnsupported
} }
// ChunkQuerier implements the Storage interface. // ChunkQuerier implements the Storage interface.
func (db *DB) ChunkQuerier(context.Context, int64, int64) (storage.ChunkQuerier, error) { func (db *DB) ChunkQuerier(int64, int64) (storage.ChunkQuerier, error) {
return nil, ErrUnsupported return nil, ErrUnsupported
} }

View file

@ -103,12 +103,12 @@ func TestUnsupportedFunctions(t *testing.T) {
defer s.Close() defer s.Close()
t.Run("Querier", func(t *testing.T) { t.Run("Querier", func(t *testing.T) {
_, err := s.Querier(context.TODO(), 0, 0) _, err := s.Querier(0, 0)
require.Equal(t, err, ErrUnsupported) require.Equal(t, err, ErrUnsupported)
}) })
t.Run("ChunkQuerier", func(t *testing.T) { t.Run("ChunkQuerier", func(t *testing.T) {
_, err := s.ChunkQuerier(context.TODO(), 0, 0) _, err := s.ChunkQuerier(0, 0)
require.Equal(t, err, ErrUnsupported) require.Equal(t, err, ErrUnsupported)
}) })

View file

@ -15,6 +15,7 @@
package tsdb package tsdb
import ( import (
"context"
"encoding/json" "encoding/json"
"io" "io"
"os" "os"
@ -65,22 +66,22 @@ type IndexReader interface {
Symbols() index.StringIter Symbols() index.StringIter
// SortedLabelValues returns sorted possible label values. // SortedLabelValues returns sorted possible label values.
SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
// LabelValues returns possible label values which may not be sorted. // LabelValues returns possible label values which may not be sorted.
LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
// Postings returns the postings list iterator for the label pairs. // Postings returns the postings list iterator for the label pairs.
// The Postings here contain the offsets to the series inside the index. // The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g. // Found IDs are not strictly required to point to a valid Series, e.g.
// during background garbage collections. // during background garbage collections.
Postings(name string, values ...string) (index.Postings, error) Postings(ctx context.Context, name string, values ...string) (index.Postings, error)
// PostingsForMatchers assembles a single postings iterator based on the given matchers. // PostingsForMatchers assembles a single postings iterator based on the given matchers.
// The resulting postings are not ordered by series. // The resulting postings are not ordered by series.
// If concurrent hint is set to true, call will be optimized for a (most likely) concurrent call with same matchers, // If concurrent hint is set to true, call will be optimized for a (most likely) concurrent call with same matchers,
// avoiding same calculations twice, however this implementation may lead to a worse performance when called once. // avoiding same calculations twice, however this implementation may lead to a worse performance when called once.
PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error)
// SortedPostings returns a postings list that is reordered to be sorted // SortedPostings returns a postings list that is reordered to be sorted
// by the label set of the underlying series. // by the label set of the underlying series.
@ -97,16 +98,16 @@ type IndexReader interface {
Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error
// LabelNames returns all the unique label names present in the index in sorted order. // LabelNames returns all the unique label names present in the index in sorted order.
LabelNames(matchers ...*labels.Matcher) ([]string, error) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error)
// LabelValueFor returns label value for the given label name in the series referred to by ID. // LabelValueFor returns label value for the given label name in the series referred to by ID.
// If the series couldn't be found or the series doesn't have the requested label a // If the series couldn't be found or the series doesn't have the requested label a
// storage.ErrNotFound is returned as error. // storage.ErrNotFound is returned as error.
LabelValueFor(id storage.SeriesRef, label string) (string, error) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error)
// LabelNamesFor returns all the label names for the series referred to by IDs. // LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted. // The names returned are sorted.
LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error)
// Close releases the underlying resources of the reader. // Close releases the underlying resources of the reader.
Close() error Close() error
@ -476,14 +477,14 @@ func (r blockIndexReader) Symbols() index.StringIter {
return r.ir.Symbols() return r.ir.Symbols()
} }
func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
var st []string var st []string
var err error var err error
if len(matchers) == 0 { if len(matchers) == 0 {
st, err = r.ir.SortedLabelValues(name) st, err = r.ir.SortedLabelValues(ctx, name)
} else { } else {
st, err = r.LabelValues(name, matchers...) st, err = r.LabelValues(ctx, name, matchers...)
if err == nil { if err == nil {
slices.Sort(st) slices.Sort(st)
} }
@ -492,33 +493,33 @@ func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
} }
func (r blockIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) == 0 { if len(matchers) == 0 {
st, err := r.ir.LabelValues(name) st, err := r.ir.LabelValues(ctx, name)
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
} }
return labelValuesWithMatchers(r.ir, name, matchers...) return labelValuesWithMatchers(ctx, r.ir, name, matchers...)
} }
func (r blockIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) == 0 { if len(matchers) == 0 {
return r.b.LabelNames() return r.b.LabelNames(ctx)
} }
return labelNamesWithMatchers(r.ir, matchers...) return labelNamesWithMatchers(ctx, r.ir, matchers...)
} }
func (r blockIndexReader) Postings(name string, values ...string) (index.Postings, error) { func (r blockIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
p, err := r.ir.Postings(name, values...) p, err := r.ir.Postings(ctx, name, values...)
if err != nil { if err != nil {
return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
} }
return p, nil return p, nil
} }
func (r blockIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (r blockIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
return r.ir.PostingsForMatchers(concurrent, ms...) return r.ir.PostingsForMatchers(ctx, concurrent, ms...)
} }
func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
@ -542,14 +543,14 @@ func (r blockIndexReader) Close() error {
} }
// LabelValueFor returns label value for the given label name in the series referred to by ID. // LabelValueFor returns label value for the given label name in the series referred to by ID.
func (r blockIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) {
return r.ir.LabelValueFor(id, label) return r.ir.LabelValueFor(ctx, id, label)
} }
// LabelNamesFor returns all the label names for the series referred to by IDs. // LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted. // The names returned are sorted.
func (r blockIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (r blockIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
return r.ir.LabelNamesFor(ids...) return r.ir.LabelNamesFor(ctx, ids...)
} }
type blockTombstoneReader struct { type blockTombstoneReader struct {
@ -573,7 +574,7 @@ func (r blockChunkReader) Close() error {
} }
// Delete matching series between mint and maxt in the block. // Delete matching series between mint and maxt in the block.
func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error { func (pb *Block) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
pb.mtx.Lock() pb.mtx.Lock()
defer pb.mtx.Unlock() defer pb.mtx.Unlock()
@ -581,7 +582,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
return ErrClosing return ErrClosing
} }
p, err := pb.indexr.PostingsForMatchers(false, ms...) p, err := pb.indexr.PostingsForMatchers(ctx, false, ms...)
if err != nil { if err != nil {
return errors.Wrap(err, "select series") return errors.Wrap(err, "select series")
} }
@ -715,8 +716,8 @@ func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool {
} }
// LabelNames returns all the unique label names present in the Block in sorted order. // LabelNames returns all the unique label names present in the Block in sorted order.
func (pb *Block) LabelNames() ([]string, error) { func (pb *Block) LabelNames(ctx context.Context) ([]string, error) {
return pb.indexr.LabelNames() return pb.indexr.LabelNames(ctx)
} }
func clampInterval(a, b, mint, maxt int64) (int64, int64) { func clampInterval(a, b, mint, maxt int64) (int64, int64) {

View file

@ -198,7 +198,7 @@ func TestCorruptedChunk(t *testing.T) {
querier, err := NewBlockQuerier(b, 0, 1) querier, err := NewBlockQuerier(b, 0, 1)
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, querier.Close()) }() defer func() { require.NoError(t, querier.Close()) }()
set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) set := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
// Check chunk errors during iter time. // Check chunk errors during iter time.
require.True(t, set.Next()) require.True(t, set.Next())
@ -211,6 +211,7 @@ func TestCorruptedChunk(t *testing.T) {
func TestLabelValuesWithMatchers(t *testing.T) { func TestLabelValuesWithMatchers(t *testing.T) {
tmpdir := t.TempDir() tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series var seriesEntries []storage.Series
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
@ -265,11 +266,11 @@ func TestLabelValuesWithMatchers(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
actualValues, err := indexReader.SortedLabelValues(tt.labelName, tt.matchers...) actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, tt.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues) require.Equal(t, tt.expectedValues, actualValues)
actualValues, err = indexReader.LabelValues(tt.labelName, tt.matchers...) actualValues, err = indexReader.LabelValues(ctx, tt.labelName, tt.matchers...)
sort.Strings(actualValues) sort.Strings(actualValues)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues) require.Equal(t, tt.expectedValues, actualValues)
@ -304,7 +305,7 @@ func TestBlockSize(t *testing.T) {
// Delete some series and check the sizes again. // Delete some series and check the sizes again.
{ {
require.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))) require.NoError(t, blockInit.Delete(context.Background(), 1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
expAfterDelete := blockInit.Size() expAfterDelete := blockInit.Size()
require.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit) require.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := fileutil.DirSize(blockDirInit) actAfterDelete, err := fileutil.DirSize(blockDirInit)
@ -368,6 +369,7 @@ func TestReadIndexFormatV1(t *testing.T) {
func BenchmarkLabelValuesWithMatchers(b *testing.B) { func BenchmarkLabelValuesWithMatchers(b *testing.B) {
tmpdir := b.TempDir() tmpdir := b.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series var seriesEntries []storage.Series
metricCount := 1000000 metricCount := 1000000
@ -401,7 +403,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for benchIdx := 0; benchIdx < b.N; benchIdx++ { for benchIdx := 0; benchIdx < b.N; benchIdx++ {
actualValues, err := indexReader.LabelValues("b_tens", matchers...) actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...)
require.NoError(b, err) require.NoError(b, err)
require.Equal(b, 9, len(actualValues)) require.Equal(b, 9, len(actualValues))
} }
@ -409,6 +411,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
func TestLabelNamesWithMatchers(t *testing.T) { func TestLabelNamesWithMatchers(t *testing.T) {
tmpdir := t.TempDir() tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series var seriesEntries []storage.Series
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
@ -474,7 +477,7 @@ func TestLabelNamesWithMatchers(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
actualNames, err := indexReader.LabelNames(tt.matchers...) actualNames, err := indexReader.LabelNames(ctx, tt.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedNames, actualNames) require.Equal(t, tt.expectedNames, actualNames)
}) })

View file

@ -1009,7 +1009,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
closers = append(closers, tombsr) closers = append(closers, tombsr)
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
all, err := indexr.Postings(k, v) all, err := indexr.Postings(ctx, k, v)
if err != nil { if err != nil {
return err return err
} }
@ -1021,7 +1021,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
// To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy. // To iterate series when populating symbols, we cannot reuse postings we just got, but need to get a new copy.
// Postings can only be iterated once. // Postings can only be iterated once.
k, v = index.AllPostingsKey() k, v = index.AllPostingsKey()
all, err = indexr.Postings(k, v) all, err = indexr.Postings(ctx, k, v)
if err != nil { if err != nil {
return err return err
} }

View file

@ -506,6 +506,7 @@ func samplesForRange(minTime, maxTime int64, maxSamplesPerChunk int) (ret [][]sa
func TestCompaction_CompactWithSplitting(t *testing.T) { func TestCompaction_CompactWithSplitting(t *testing.T) {
seriesCounts := []int{10, 1234} seriesCounts := []int{10, 1234}
shardCounts := []uint64{1, 13} shardCounts := []uint64{1, 13}
ctx := context.Background()
for _, series := range seriesCounts { for _, series := range seriesCounts {
dir, err := os.MkdirTemp("", "compact") dir, err := os.MkdirTemp("", "compact")
@ -533,7 +534,7 @@ func TestCompaction_CompactWithSplitting(t *testing.T) {
for _, shardCount := range shardCounts { for _, shardCount := range shardCounts {
t.Run(fmt.Sprintf("series=%d, shards=%d", series, shardCount), func(t *testing.T) { t.Run(fmt.Sprintf("series=%d, shards=%d", series, shardCount), func(t *testing.T) {
c, err := NewLeveledCompactorWithChunkSize(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil, true) c, err := NewLeveledCompactorWithChunkSize(ctx, nil, log.NewNopLogger(), []int64{0}, nil, chunks.DefaultChunkSegmentSize, nil, true)
require.NoError(t, err) require.NoError(t, err)
blockIDs, err := c.CompactWithSplitting(dir, blockDirs, openBlocks, shardCount) blockIDs, err := c.CompactWithSplitting(dir, blockDirs, openBlocks, shardCount)
@ -585,7 +586,7 @@ func TestCompaction_CompactWithSplitting(t *testing.T) {
}() }()
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
p, err := idxr.Postings(k, v) p, err := idxr.Postings(ctx, k, v)
require.NoError(t, err) require.NoError(t, err)
var lbls labels.ScratchBuilder var lbls labels.ScratchBuilder
@ -1471,6 +1472,8 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
for title, bootStrap := range tests { for title, bootStrap := range tests {
t.Run(title, func(t *testing.T) { t.Run(title, func(t *testing.T) {
ctx := context.Background()
db := openTestDB(t, nil, []int64{1, 100}) db := openTestDB(t, nil, []int64{1, 100})
defer func() { defer func() {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
@ -1494,7 +1497,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
// Do the compaction and check the metrics. // Do the compaction and check the metrics.
// Compaction should succeed, but the reloadBlocks should fail and // Compaction should succeed, but the reloadBlocks should fail and
// the new block created from the compaction should be deleted. // the new block created from the compaction should be deleted.
require.Error(t, db.Compact()) require.Error(t, db.Compact(ctx))
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch") require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.reloadsFailed), "'failed db reloadBlocks' count metrics mismatch")
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "`compaction` count metric mismatch") require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran), "`compaction` count metric mismatch")
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch") require.Equal(t, 1.0, prom_testutil.ToFloat64(db.metrics.compactionsFailed), "`compactions failed` count metric mismatch")

View file

@ -567,22 +567,22 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
// Querier loads the blocks and wal and returns a new querier over the data partition for the given time range. // Querier loads the blocks and wal and returns a new querier over the data partition for the given time range.
// Current implementation doesn't support multiple Queriers. // Current implementation doesn't support multiple Queriers.
func (db *DBReadOnly) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (db *DBReadOnly) Querier(mint, maxt int64) (storage.Querier, error) {
q, err := db.loadDataAsQueryable(maxt) q, err := db.loadDataAsQueryable(maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return q.Querier(ctx, mint, maxt) return q.Querier(mint, maxt)
} }
// ChunkQuerier loads blocks and the wal and returns a new chunk querier over the data partition for the given time range. // ChunkQuerier loads blocks and the wal and returns a new chunk querier over the data partition for the given time range.
// Current implementation doesn't support multiple ChunkQueriers. // Current implementation doesn't support multiple ChunkQueriers.
func (db *DBReadOnly) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (db *DBReadOnly) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
q, err := db.loadDataAsQueryable(maxt) q, err := db.loadDataAsQueryable(maxt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return q.ChunkQuerier(ctx, mint, maxt) return q.ChunkQuerier(mint, maxt)
} }
// Blocks returns a slice of block readers for persisted blocks. // Blocks returns a slice of block readers for persisted blocks.
@ -956,7 +956,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
db.oooWasEnabled.Store(true) db.oooWasEnabled.Store(true)
} }
go db.run() go db.run(ctx)
return db, nil return db, nil
} }
@ -997,7 +997,7 @@ func (db *DB) Dir() string {
return db.dir return db.dir
} }
func (db *DB) run() { func (db *DB) run(ctx context.Context) {
defer close(db.donec) defer close(db.donec)
backoff := time.Duration(0) backoff := time.Duration(0)
@ -1028,7 +1028,7 @@ func (db *DB) run() {
db.autoCompactMtx.Lock() db.autoCompactMtx.Lock()
if db.autoCompact { if db.autoCompact {
if err := db.Compact(); err != nil { if err := db.Compact(ctx); err != nil {
level.Error(db.logger).Log("msg", "compaction failed", "err", err) level.Error(db.logger).Log("msg", "compaction failed", "err", err)
backoff = exponential(backoff, 1*time.Second, 1*time.Minute) backoff = exponential(backoff, 1*time.Second, 1*time.Minute)
} else { } else {
@ -1148,7 +1148,7 @@ func (a dbAppender) Commit() error {
// which will also delete the blocks that fall out of the retention window. // which will also delete the blocks that fall out of the retention window.
// Old blocks are only deleted on reloadBlocks based on the new block's parent information. // Old blocks are only deleted on reloadBlocks based on the new block's parent information.
// See DB.reloadBlocks documentation for further information. // See DB.reloadBlocks documentation for further information.
func (db *DB) Compact() (returnErr error) { func (db *DB) Compact(ctx context.Context) (returnErr error) {
db.cmtx.Lock() db.cmtx.Lock()
defer db.cmtx.Unlock() defer db.cmtx.Unlock()
defer func() { defer func() {
@ -1221,7 +1221,7 @@ func (db *DB) Compact() (returnErr error) {
if lastBlockMaxt != math.MinInt64 { if lastBlockMaxt != math.MinInt64 {
// The head was compacted, so we compact OOO head as well. // The head was compacted, so we compact OOO head as well.
if err := db.compactOOOHead(); err != nil { if err := db.compactOOOHead(ctx); err != nil {
return errors.Wrap(err, "compact ooo head") return errors.Wrap(err, "compact ooo head")
} }
} }
@ -1245,18 +1245,18 @@ func (db *DB) CompactHead(head *RangeHead) error {
} }
// CompactOOOHead compacts the OOO Head. // CompactOOOHead compacts the OOO Head.
func (db *DB) CompactOOOHead() error { func (db *DB) CompactOOOHead(ctx context.Context) error {
db.cmtx.Lock() db.cmtx.Lock()
defer db.cmtx.Unlock() defer db.cmtx.Unlock()
return db.compactOOOHead() return db.compactOOOHead(ctx)
} }
func (db *DB) compactOOOHead() error { func (db *DB) compactOOOHead(ctx context.Context) error {
if !db.oooWasEnabled.Load() { if !db.oooWasEnabled.Load() {
return nil return nil
} }
oooHead, err := NewOOOCompactionHead(db.head) oooHead, err := NewOOOCompactionHead(ctx, db.head)
if err != nil { if err != nil {
return errors.Wrap(err, "get ooo compaction head") return errors.Wrap(err, "get ooo compaction head")
} }
@ -1894,7 +1894,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error {
} }
// Querier returns a new querier over the data partition for the given time range. // Querier returns a new querier over the data partition for the given time range.
func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, error) { func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) {
var blocks []BlockReader var blocks []BlockReader
db.mtx.RLock() db.mtx.RLock()
@ -2042,7 +2042,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie
} }
// ChunkQuerier returns a new chunk querier over the data partition for the given time range. // ChunkQuerier returns a new chunk querier over the data partition for the given time range.
func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (db *DB) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
blockQueriers, err := db.blockChunkQuerierForRange(mint, maxt) blockQueriers, err := db.blockChunkQuerierForRange(mint, maxt)
if err != nil { if err != nil {
return nil, err return nil, err
@ -2069,7 +2069,7 @@ func rangeForTimestamp(t, width int64) (maxt int64) {
} }
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis. // Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error { func (db *DB) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
db.cmtx.Lock() db.cmtx.Lock()
defer db.cmtx.Unlock() defer db.cmtx.Unlock()
@ -2081,13 +2081,13 @@ func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
for _, b := range db.blocks { for _, b := range db.blocks {
if b.OverlapsClosedInterval(mint, maxt) { if b.OverlapsClosedInterval(mint, maxt) {
g.Go(func(b *Block) func() error { g.Go(func(b *Block) func() error {
return func() error { return b.Delete(mint, maxt, ms...) } return func() error { return b.Delete(ctx, mint, maxt, ms...) }
}(b)) }(b))
} }
} }
if db.head.OverlapsClosedInterval(mint, maxt) { if db.head.OverlapsClosedInterval(mint, maxt) {
g.Go(func() error { g.Go(func() error {
return db.head.Delete(mint, maxt, ms...) return db.head.Delete(ctx, mint, maxt, ms...)
}) })
} }

File diff suppressed because it is too large Load diff

View file

@ -59,9 +59,9 @@ func Example() {
// ... adding more samples. // ... adding more samples.
// Open a querier for reading. // Open a querier for reading.
querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) querier, err := db.Querier(math.MinInt64, math.MaxInt64)
noErr(err) noErr(err)
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
for ss.Next() { for ss.Next() {
series := ss.At() series := ss.At()

View file

@ -14,6 +14,7 @@
package tsdb package tsdb
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -1453,19 +1454,23 @@ func (h *RangeHead) String() string {
// Delete all samples in the range of [mint, maxt] for series that satisfy the given // Delete all samples in the range of [mint, maxt] for series that satisfy the given
// label matchers. // label matchers.
func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
// Do not delete anything beyond the currently valid range. // Do not delete anything beyond the currently valid range.
mint, maxt = clampInterval(mint, maxt, h.MinTime(), h.MaxTime()) mint, maxt = clampInterval(mint, maxt, h.MinTime(), h.MaxTime())
ir := h.indexRange(mint, maxt) ir := h.indexRange(mint, maxt)
p, err := ir.PostingsForMatchers(false, ms...) p, err := ir.PostingsForMatchers(ctx, false, ms...)
if err != nil { if err != nil {
return errors.Wrap(err, "select series") return errors.Wrap(err, "select series")
} }
var stones []tombstones.Stone var stones []tombstones.Stone
for p.Next() { for p.Next() {
if err := ctx.Err(); err != nil {
return errors.Wrap(err, "select series")
}
series := h.series.getByID(chunks.HeadSeriesRef(p.At())) series := h.series.getByID(chunks.HeadSeriesRef(p.At()))
if series == nil { if series == nil {
level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") level.Debug(h.logger).Log("msg", "Series not found in Head.Delete")
@ -1485,6 +1490,10 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
if p.Err() != nil { if p.Err() != nil {
return p.Err() return p.Err()
} }
if ctx.Err() != nil {
return errors.Wrap(err, "select series")
}
if h.wal != nil { if h.wal != nil {
var enc record.Encoder var enc record.Encoder
if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil { if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil {

View file

@ -62,8 +62,8 @@ func (h *headIndexReader) Symbols() index.StringIter {
// specific label name that are within the time range mint to maxt. // specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
values, err := h.LabelValues(name, matchers...) values, err := h.LabelValues(ctx, name, matchers...)
if err == nil { if err == nil {
slices.Sort(values) slices.Sort(values)
} }
@ -74,21 +74,21 @@ func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat
// specific label name that are within the time range mint to maxt. // specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (h *headIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
return []string{}, nil return []string{}, nil
} }
if len(matchers) == 0 { if len(matchers) == 0 {
return h.head.postings.LabelValues(name), nil return h.head.postings.LabelValues(ctx, name), nil
} }
return labelValuesWithMatchers(h, name, matchers...) return labelValuesWithMatchers(ctx, h, name, matchers...)
} }
// LabelNames returns all the unique label names present in the head // LabelNames returns all the unique label names present in the head
// that are within the time range mint to maxt. // that are within the time range mint to maxt.
func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) {
if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() {
return []string{}, nil return []string{}, nil
} }
@ -99,11 +99,11 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err
return labelNames, nil return labelNames, nil
} }
return labelNamesWithMatchers(h, matchers...) return labelNamesWithMatchers(ctx, h, matchers...)
} }
// Postings returns the postings list iterator for the label pairs. // Postings returns the postings list iterator for the label pairs.
func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) { func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
switch len(values) { switch len(values) {
case 0: case 0:
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
@ -116,12 +116,12 @@ func (h *headIndexReader) Postings(name string, values ...string) (index.Posting
res = append(res, p) res = append(res, p)
} }
} }
return index.Merge(res...), nil return index.Merge(ctx, res...), nil
} }
} }
func (h *headIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (h *headIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
return h.head.pfmc.PostingsForMatchers(h, concurrent, ms...) return h.head.pfmc.PostingsForMatchers(ctx, h, concurrent, ms...)
} }
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
@ -245,7 +245,7 @@ func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID {
} }
// LabelValueFor returns label value for the given label name in the series referred to by ID. // LabelValueFor returns label value for the given label name in the series referred to by ID.
func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) {
memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id))
if memSeries == nil { if memSeries == nil {
return "", storage.ErrNotFound return "", storage.ErrNotFound
@ -261,9 +261,12 @@ func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (str
// LabelNamesFor returns all the label names for the series referred to by IDs. // LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted. // The names returned are sorted.
func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
namesMap := make(map[string]struct{}) namesMap := make(map[string]struct{})
for _, id := range ids { for _, id := range ids {
if ctx.Err() != nil {
return nil, ctx.Err()
}
memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id))
if memSeries == nil { if memSeries == nil {
return nil, storage.ErrNotFound return nil, storage.ErrNotFound

View file

@ -135,7 +135,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
} }
} }
func populateTestWAL(t testing.TB, w *wlog.WL, recs []interface{}) { func populateTestWL(t testing.TB, w *wlog.WL, recs []interface{}) {
var enc record.Encoder var enc record.Encoder
for _, r := range recs { for _, r := range recs {
switch v := r.(type) { switch v := r.(type) {
@ -147,6 +147,8 @@ func populateTestWAL(t testing.TB, w *wlog.WL, recs []interface{}) {
require.NoError(t, w.Log(enc.Tombstones(v, nil))) require.NoError(t, w.Log(enc.Tombstones(v, nil)))
case []record.RefExemplar: case []record.RefExemplar:
require.NoError(t, w.Log(enc.Exemplars(v, nil))) require.NoError(t, w.Log(enc.Exemplars(v, nil)))
case []record.RefMmapMarker:
require.NoError(t, w.Log(enc.MmapMarkers(v, nil)))
} }
} }
} }
@ -197,13 +199,18 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
return recs return recs
} }
func BenchmarkLoadWAL(b *testing.B) { func BenchmarkLoadWLs(b *testing.B) {
cases := []struct { cases := []struct {
// Total series is (batches*seriesPerBatch). // Total series is (batches*seriesPerBatch).
batches int batches int
seriesPerBatch int seriesPerBatch int
samplesPerSeries int samplesPerSeries int
mmappedChunkT int64 mmappedChunkT int64
// The first oooSeriesPct*seriesPerBatch series in a batch are selected as "OOO" series.
oooSeriesPct float64
// The first oooSamplesPct*samplesPerSeries samples in an OOO series are written as OOO samples.
oooSamplesPct float64
oooCapMax int64
}{ }{
{ // Less series and more samples. 2 hour WAL with 1 second scrape interval. { // Less series and more samples. 2 hour WAL with 1 second scrape interval.
batches: 10, batches: 10,
@ -226,6 +233,31 @@ func BenchmarkLoadWAL(b *testing.B) {
samplesPerSeries: 480, samplesPerSeries: 480,
mmappedChunkT: 3800, mmappedChunkT: 3800,
}, },
{ // A lot of OOO samples (50% series with 50% of samples being OOO).
batches: 10,
seriesPerBatch: 1000,
samplesPerSeries: 480,
oooSeriesPct: 0.5,
oooSamplesPct: 0.5,
oooCapMax: DefaultOutOfOrderCapMax,
},
{ // Fewer OOO samples (10% of series with 10% of samples being OOO).
batches: 10,
seriesPerBatch: 1000,
samplesPerSeries: 480,
oooSeriesPct: 0.1,
oooSamplesPct: 0.1,
},
{ // 2 hour WAL with 15 second scrape interval, and mmapped chunks up to last 100 samples.
// Four mmap markers per OOO series: 480 * 0.3 = 144, 144 / 32 (DefaultOutOfOrderCapMax) = 4.
batches: 100,
seriesPerBatch: 1000,
samplesPerSeries: 480,
mmappedChunkT: 3800,
oooSeriesPct: 0.2,
oooSamplesPct: 0.3,
oooCapMax: DefaultOutOfOrderCapMax,
},
} }
labelsPerSeries := 5 labelsPerSeries := 5
@ -241,12 +273,17 @@ func BenchmarkLoadWAL(b *testing.B) {
continue continue
} }
lastExemplarsPerSeries = exemplarsPerSeries lastExemplarsPerSeries = exemplarsPerSeries
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT), b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d,mmappedChunkT=%d,oooSeriesPct=%.3f,oooSamplesPct=%.3f,oooCapMax=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries, c.mmappedChunkT, c.oooSeriesPct, c.oooSamplesPct, c.oooCapMax),
func(b *testing.B) { func(b *testing.B) {
dir := b.TempDir() dir := b.TempDir()
w, err := wlog.New(nil, nil, dir, wlog.CompressionNone) wal, err := wlog.New(nil, nil, dir, wlog.CompressionNone)
require.NoError(b, err) require.NoError(b, err)
var wbl *wlog.WL
if c.oooSeriesPct != 0 {
wbl, err = wlog.New(nil, nil, dir, wlog.CompressionNone)
require.NoError(b, err)
}
// Write series. // Write series.
refSeries := make([]record.RefSeries, 0, c.seriesPerBatch) refSeries := make([]record.RefSeries, 0, c.seriesPerBatch)
@ -260,22 +297,33 @@ func BenchmarkLoadWAL(b *testing.B) {
} }
refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: labels.FromMap(lbls)}) refSeries = append(refSeries, record.RefSeries{Ref: chunks.HeadSeriesRef(i) * 101, Labels: labels.FromMap(lbls)})
} }
populateTestWAL(b, w, []interface{}{refSeries}) populateTestWL(b, wal, []interface{}{refSeries})
} }
// Write samples. // Write samples.
refSamples := make([]record.RefSample, 0, c.seriesPerBatch) refSamples := make([]record.RefSample, 0, c.seriesPerBatch)
oooSeriesPerBatch := int(float64(c.seriesPerBatch) * c.oooSeriesPct)
oooSamplesPerSeries := int(float64(c.samplesPerSeries) * c.oooSamplesPct)
for i := 0; i < c.samplesPerSeries; i++ { for i := 0; i < c.samplesPerSeries; i++ {
for j := 0; j < c.batches; j++ { for j := 0; j < c.batches; j++ {
refSamples = refSamples[:0] refSamples = refSamples[:0]
for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ {
k := j * c.seriesPerBatch
// Skip appending the first oooSamplesPerSeries samples for the series in the batch that
// should have OOO samples. OOO samples are appended after all the in-order samples.
if i < oooSamplesPerSeries {
k += oooSeriesPerBatch
}
for ; k < (j+1)*c.seriesPerBatch; k++ {
refSamples = append(refSamples, record.RefSample{ refSamples = append(refSamples, record.RefSample{
Ref: chunks.HeadSeriesRef(k) * 101, Ref: chunks.HeadSeriesRef(k) * 101,
T: int64(i) * 10, T: int64(i) * 10,
V: float64(i) * 100, V: float64(i) * 100,
}) })
} }
populateTestWAL(b, w, []interface{}{refSamples}) populateTestWL(b, wal, []interface{}{refSamples})
} }
} }
@ -293,6 +341,10 @@ func BenchmarkLoadWAL(b *testing.B) {
lbls := labels.Labels{} lbls := labels.Labels{}
s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled) s := newMemSeries(lbls, chunks.HeadSeriesRef(k)*101, labels.StableHash(lbls), 0, defaultIsolationDisabled)
s.append(c.mmappedChunkT, 42, 0, cOpts) s.append(c.mmappedChunkT, 42, 0, cOpts)
// There's only one head chunk because only a single sample is appended. mmapChunks()
// ignores the latest chunk, so we need to cut a new head chunk to guarantee the chunk with
// the sample at c.mmappedChunkT is mmapped.
s.cutNewHeadChunk(c.mmappedChunkT, chunkenc.EncXOR, c.mmappedChunkT)
s.mmapChunks(chunkDiskMapper) s.mmapChunks(chunkDiskMapper)
} }
require.NoError(b, chunkDiskMapper.Close()) require.NoError(b, chunkDiskMapper.Close())
@ -311,7 +363,39 @@ func BenchmarkLoadWAL(b *testing.B) {
Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)),
}) })
} }
populateTestWAL(b, w, []interface{}{refExemplars}) populateTestWL(b, wal, []interface{}{refExemplars})
}
}
// Write OOO samples and mmap markers.
refMarkers := make([]record.RefMmapMarker, 0, oooSeriesPerBatch)
refSamples = make([]record.RefSample, 0, oooSeriesPerBatch)
for i := 0; i < oooSamplesPerSeries; i++ {
shouldAddMarkers := c.oooCapMax != 0 && i != 0 && int64(i)%c.oooCapMax == 0
for j := 0; j < c.batches; j++ {
refSamples = refSamples[:0]
if shouldAddMarkers {
refMarkers = refMarkers[:0]
}
for k := j * c.seriesPerBatch; k < (j*c.seriesPerBatch)+oooSeriesPerBatch; k++ {
ref := chunks.HeadSeriesRef(k) * 101
if shouldAddMarkers {
// loadWBL() checks that the marker's MmapRef is less than or equal to the ref
// for the last mmap chunk. Setting MmapRef to 0 to always pass that check.
refMarkers = append(refMarkers, record.RefMmapMarker{Ref: ref, MmapRef: 0})
}
refSamples = append(refSamples, record.RefSample{
Ref: ref,
T: int64(i) * 10,
V: float64(i) * 100,
})
}
if shouldAddMarkers {
populateTestWL(b, wbl, []interface{}{refMarkers})
}
populateTestWL(b, wal, []interface{}{refSamples})
populateTestWL(b, wbl, []interface{}{refSamples})
} }
} }
@ -321,13 +405,19 @@ func BenchmarkLoadWAL(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = 1000 opts.ChunkRange = 1000
opts.ChunkDirRoot = w.Dir() opts.ChunkDirRoot = dir
h, err := NewHead(nil, nil, w, nil, opts, nil) if c.oooCapMax > 0 {
opts.OutOfOrderCapMax.Store(c.oooCapMax)
}
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
require.NoError(b, err) require.NoError(b, err)
h.Init(0) h.Init(0)
} }
b.StopTimer() b.StopTimer()
w.Close() wal.Close()
if wbl != nil {
wbl.Close()
}
}) })
} }
} }
@ -564,7 +654,7 @@ func TestHead_ReadWAL(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
}() }()
populateTestWAL(t, w, entries) populateTestWL(t, w, entries)
require.NoError(t, head.Init(math.MinInt64)) require.NoError(t, head.Init(math.MinInt64))
require.Equal(t, uint64(101), head.lastSeriesID.Load()) require.Equal(t, uint64(101), head.lastSeriesID.Load())
@ -717,6 +807,8 @@ func TestHead_Truncate(t *testing.T) {
h.initTime(0) h.initTime(0)
ctx := context.Background()
s1, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1", "b", "1")) s1, _, _ := h.getOrCreate(1, labels.FromStrings("a", "1", "b", "1"))
s2, _, _ := h.getOrCreate(2, labels.FromStrings("a", "2", "b", "1")) s2, _, _ := h.getOrCreate(2, labels.FromStrings("a", "2", "b", "1"))
s3, _, _ := h.getOrCreate(3, labels.FromStrings("a", "1", "b", "2")) s3, _, _ := h.getOrCreate(3, labels.FromStrings("a", "1", "b", "2"))
@ -785,7 +877,7 @@ func TestHead_Truncate(t *testing.T) {
ss = map[string]struct{}{} ss = map[string]struct{}{}
values[name] = ss values[name] = ss
} }
for _, value := range h.postings.LabelValues(name) { for _, value := range h.postings.LabelValues(ctx, name) {
ss[value] = struct{}{} ss[value] = struct{}{}
} }
} }
@ -1039,11 +1131,11 @@ func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
}() }()
populateTestWAL(t, w, entries) populateTestWL(t, w, entries)
require.NoError(t, head.Init(math.MinInt64)) require.NoError(t, head.Init(math.MinInt64))
require.NoError(t, head.Delete(0, 100, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))) require.NoError(t, head.Delete(context.Background(), 0, 100, labels.MustNewMatcher(labels.MatchEqual, "a", "1")))
}) })
} }
} }
@ -1115,7 +1207,7 @@ func TestHeadDeleteSimple(t *testing.T) {
// Delete the ranges. // Delete the ranges.
for _, r := range c.dranges { for _, r := range c.dranges {
require.NoError(t, head.Delete(r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))) require.NoError(t, head.Delete(context.Background(), r.Mint, r.Maxt, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)))
} }
// Add more samples. // Add more samples.
@ -1142,7 +1234,7 @@ func TestHeadDeleteSimple(t *testing.T) {
for _, h := range []*Head{head, reloadedHead} { for _, h := range []*Head{head, reloadedHead} {
q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime()) q, err := NewBlockQuerier(h, h.MinTime(), h.MaxTime())
require.NoError(t, err) require.NoError(t, err)
actSeriesSet := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value)) actSeriesSet := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, lblDefault.Name, lblDefault.Value))
require.NoError(t, q.Close()) require.NoError(t, q.Close())
expSeriesSet := newMockSeriesSet([]storage.Series{ expSeriesSet := newMockSeriesSet([]storage.Series{
storage.NewListSeries(lblsDefault, func() []chunks.Sample { storage.NewListSeries(lblsDefault, func() []chunks.Sample {
@ -1197,12 +1289,12 @@ func TestDeleteUntilCurMax(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
require.NoError(t, hb.Delete(0, 10000, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) require.NoError(t, hb.Delete(context.Background(), 0, 10000, labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
// Test the series returns no samples. The series is cleared only after compaction. // Test the series returns no samples. The series is cleared only after compaction.
q, err := NewBlockQuerier(hb, 0, 100000) q, err := NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err) require.NoError(t, err)
res := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, res.Next(), "series is not present") require.True(t, res.Next(), "series is not present")
s := res.At() s := res.At()
it := s.Iterator(nil) it := s.Iterator(nil)
@ -1219,7 +1311,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
q, err = NewBlockQuerier(hb, 0, 100000) q, err = NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err) require.NoError(t, err)
res = q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) res = q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, res.Next(), "series don't exist") require.True(t, res.Next(), "series don't exist")
exps := res.At() exps := res.At()
it = exps.Iterator(nil) it = exps.Iterator(nil)
@ -1244,7 +1336,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
} }
require.NoError(t, hb.Delete(0, int64(numSamples), labels.MustNewMatcher(labels.MatchEqual, "a", "b"))) require.NoError(t, hb.Delete(context.Background(), 0, int64(numSamples), labels.MustNewMatcher(labels.MatchEqual, "a", "b")))
require.NoError(t, hb.Truncate(1)) require.NoError(t, hb.Truncate(1))
require.NoError(t, hb.Close()) require.NoError(t, hb.Close())
@ -1376,7 +1468,7 @@ func TestDelete_e2e(t *testing.T) {
} }
for _, del := range dels { for _, del := range dels {
for _, r := range del.drange { for _, r := range del.drange {
require.NoError(t, hb.Delete(r.Mint, r.Maxt, del.ms...)) require.NoError(t, hb.Delete(context.Background(), r.Mint, r.Maxt, del.ms...))
} }
matched := labels.Slice{} matched := labels.Slice{}
for _, l := range lbls { for _, l := range lbls {
@ -1391,7 +1483,7 @@ func TestDelete_e2e(t *testing.T) {
q, err := NewBlockQuerier(hb, 0, 100000) q, err := NewBlockQuerier(hb, 0, 100000)
require.NoError(t, err) require.NoError(t, err)
defer q.Close() defer q.Close()
ss := q.Select(true, nil, del.ms...) ss := q.Select(context.Background(), true, nil, del.ms...)
// Build the mockSeriesSet. // Build the mockSeriesSet.
matchedSeries := make([]storage.Series, 0, len(matched)) matchedSeries := make([]storage.Series, 0, len(matched))
for _, m := range matched { for _, m := range matched {
@ -1840,7 +1932,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer q.Close() defer q.Close()
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.Equal(t, true, ss.Next()) require.Equal(t, true, ss.Next())
for ss.Next() { for ss.Next() {
} }
@ -1869,7 +1961,7 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
q, err := NewBlockQuerier(h, 1500, 2500) q, err := NewBlockQuerier(h, 1500, 2500)
require.NoError(t, err) require.NoError(t, err)
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "1"))
require.Equal(t, false, ss.Next()) require.Equal(t, false, ss.Next())
require.Equal(t, 0, len(ss.Warnings())) require.Equal(t, 0, len(ss.Warnings()))
require.NoError(t, q.Close()) require.NoError(t, q.Close())
@ -2154,7 +2246,7 @@ func TestMemSeriesIsolation(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
defer querier.Close() defer querier.Close()
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
_, seriesSet, ws, err := expandSeriesSet(ss) _, seriesSet, ws, err := expandSeriesSet(ss)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 0, len(ws)) require.Equal(t, 0, len(ws))
@ -2461,7 +2553,7 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load()) require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
require.NoError(t, db.Compact()) require.NoError(t, db.Compact(ctx))
require.Greater(t, db.head.minValidTime.Load(), int64(0)) require.Greater(t, db.head.minValidTime.Load(), int64(0))
app = db.Appender(ctx) app = db.Appender(ctx)
@ -2526,7 +2618,7 @@ func testHeadSeriesChunkRace(t *testing.T) {
h.gc() h.gc()
wg.Done() wg.Done()
}() }()
ss := q.Select(false, nil, matcher) ss := q.Select(context.Background(), false, nil, matcher)
for ss.Next() { for ss.Next() {
} }
require.NoError(t, ss.Err()) require.NoError(t, ss.Err())
@ -2552,9 +2644,10 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
} }
expectedLabelNames = []string{"a", "b", "c"} expectedLabelNames = []string{"a", "b", "c"}
expectedLabelValues = []string{"d", "e", "f"} expectedLabelValues = []string{"d", "e", "f"}
ctx = context.Background()
) )
app := head.Appender(context.Background()) app := head.Appender(ctx)
for i, name := range expectedLabelNames { for i, name := range expectedLabelNames {
_, err := app.Append(0, labels.FromStrings(name, expectedLabelValues[i]), seriesTimestamps[i], 0) _, err := app.Append(0, labels.FromStrings(name, expectedLabelValues[i]), seriesTimestamps[i], 0)
require.NoError(t, err) require.NoError(t, err)
@ -2579,12 +2672,12 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
headIdxReader := head.indexRange(tt.mint, tt.maxt) headIdxReader := head.indexRange(tt.mint, tt.maxt)
actualLabelNames, err := headIdxReader.LabelNames() actualLabelNames, err := headIdxReader.LabelNames(ctx)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedNames, actualLabelNames) require.Equal(t, tt.expectedNames, actualLabelNames)
if len(tt.expectedValues) > 0 { if len(tt.expectedValues) > 0 {
for i, name := range expectedLabelNames { for i, name := range expectedLabelNames {
actualLabelValue, err := headIdxReader.SortedLabelValues(name) actualLabelValue, err := headIdxReader.SortedLabelValues(ctx, name)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, []string{tt.expectedValues[i]}, actualLabelValue) require.Equal(t, []string{tt.expectedValues[i]}, actualLabelValue)
} }
@ -2597,6 +2690,8 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) {
head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) head, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
t.Cleanup(func() { require.NoError(t, head.Close()) }) t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
app := head.Appender(context.Background()) app := head.Appender(context.Background())
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
_, err := app.Append(0, labels.FromStrings( _, err := app.Append(0, labels.FromStrings(
@ -2640,11 +2735,11 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
headIdxReader := head.indexRange(0, 200) headIdxReader := head.indexRange(0, 200)
actualValues, err := headIdxReader.SortedLabelValues(tt.labelName, tt.matchers...) actualValues, err := headIdxReader.SortedLabelValues(ctx, tt.labelName, tt.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues) require.Equal(t, tt.expectedValues, actualValues)
actualValues, err = headIdxReader.LabelValues(tt.labelName, tt.matchers...) actualValues, err = headIdxReader.LabelValues(ctx, tt.labelName, tt.matchers...)
sort.Strings(actualValues) sort.Strings(actualValues)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedValues, actualValues) require.Equal(t, tt.expectedValues, actualValues)
@ -2713,7 +2808,7 @@ func TestHeadLabelNamesWithMatchers(t *testing.T) {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
headIdxReader := head.indexRange(0, 200) headIdxReader := head.indexRange(0, 200)
actualNames, err := headIdxReader.LabelNames(tt.matchers...) actualNames, err := headIdxReader.LabelNames(context.Background(), tt.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tt.expectedNames, actualNames) require.Equal(t, tt.expectedNames, actualNames)
}) })
@ -2726,8 +2821,10 @@ func TestHeadShardedPostings(t *testing.T) {
require.NoError(t, head.Close()) require.NoError(t, head.Close())
}() }()
ctx := context.Background()
// Append some series. // Append some series.
app := head.Appender(context.Background()) app := head.Appender(ctx)
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
_, err := app.Append(0, labels.FromStrings("unique", fmt.Sprintf("value%d", i), "const", "1"), 100, 0) _, err := app.Append(0, labels.FromStrings("unique", fmt.Sprintf("value%d", i), "const", "1"), 100, 0)
require.NoError(t, err) require.NoError(t, err)
@ -2738,7 +2835,7 @@ func TestHeadShardedPostings(t *testing.T) {
// List all postings for a given label value. This is what we expect to get // List all postings for a given label value. This is what we expect to get
// in output from all shards. // in output from all shards.
p, err := ir.Postings("const", "1") p, err := ir.Postings(ctx, "const", "1")
require.NoError(t, err) require.NoError(t, err)
var expected []storage.SeriesRef var expected []storage.SeriesRef
@ -2754,7 +2851,7 @@ func TestHeadShardedPostings(t *testing.T) {
actualPostings := make([]storage.SeriesRef, 0, len(expected)) actualPostings := make([]storage.SeriesRef, 0, len(expected))
for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ { for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ {
p, err = ir.Postings("const", "1") p, err = ir.Postings(ctx, "const", "1")
require.NoError(t, err) require.NoError(t, err)
p = ir.ShardedPostings(p, shardIndex, shardCount) p = ir.ShardedPostings(p, shardIndex, shardCount)
@ -2877,6 +2974,8 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
head, _ := newTestHead(b, chunkRange, wlog.CompressionNone, false) head, _ := newTestHead(b, chunkRange, wlog.CompressionNone, false)
b.Cleanup(func() { require.NoError(b, head.Close()) }) b.Cleanup(func() { require.NoError(b, head.Close()) })
ctx := context.Background()
app := head.Appender(context.Background()) app := head.Appender(context.Background())
metricCount := 1000000 metricCount := 1000000
@ -2897,7 +2996,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for benchIdx := 0; benchIdx < b.N; benchIdx++ { for benchIdx := 0; benchIdx < b.N; benchIdx++ {
actualValues, err := headIdxReader.LabelValues("b_tens", matchers...) actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", matchers...)
require.NoError(b, err) require.NoError(b, err)
require.Equal(b, 9, len(actualValues)) require.Equal(b, 9, len(actualValues))
} }
@ -2974,6 +3073,7 @@ func TestIteratorSeekIntoBuffer(t *testing.T) {
func TestChunkNotFoundHeadGCRace(t *testing.T) { func TestChunkNotFoundHeadGCRace(t *testing.T) {
db := newTestDB(t) db := newTestDB(t)
db.DisableCompactions() db.DisableCompactions()
ctx := context.Background()
var ( var (
app = db.Appender(context.Background()) app = db.Appender(context.Background())
@ -2993,11 +3093,11 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
// Get a querier before compaction (or when compaction is about to begin). // Get a querier before compaction (or when compaction is about to begin).
q, err := db.Querier(context.Background(), mint, maxt) q, err := db.Querier(mint, maxt)
require.NoError(t, err) require.NoError(t, err)
// Query the compacted range and get the first series before compaction. // Query the compacted range and get the first series before compaction.
ss := q.Select(true, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) ss := q.Select(context.Background(), true, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, ss.Next()) require.True(t, ss.Next())
s := ss.At() s := ss.At()
@ -3006,7 +3106,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
go func() { go func() {
defer wg.Done() defer wg.Done()
// Compacting head while the querier spans the compaction time. // Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact()) require.NoError(t, db.Compact(ctx))
require.Greater(t, len(db.Blocks()), 0) require.Greater(t, len(db.Blocks()), 0)
}() }()
@ -3039,6 +3139,7 @@ func TestChunkNotFoundHeadGCRace(t *testing.T) {
func TestDataMissingOnQueryDuringCompaction(t *testing.T) { func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
db := newTestDB(t) db := newTestDB(t)
db.DisableCompactions() db.DisableCompactions()
ctx := context.Background()
var ( var (
app = db.Appender(context.Background()) app = db.Appender(context.Background())
@ -3060,7 +3161,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
// Get a querier before compaction (or when compaction is about to begin). // Get a querier before compaction (or when compaction is about to begin).
q, err := db.Querier(context.Background(), mint, maxt) q, err := db.Querier(mint, maxt)
require.NoError(t, err) require.NoError(t, err)
var wg sync.WaitGroup var wg sync.WaitGroup
@ -3068,7 +3169,7 @@ func TestDataMissingOnQueryDuringCompaction(t *testing.T) {
go func() { go func() {
defer wg.Done() defer wg.Done()
// Compacting head while the querier spans the compaction time. // Compacting head while the querier spans the compaction time.
require.NoError(t, db.Compact()) require.NoError(t, db.Compact(ctx))
require.Greater(t, len(db.Blocks()), 0) require.Greater(t, len(db.Blocks()), 0)
}() }()
@ -3174,11 +3275,11 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) {
require.True(t, waitOver.Load()) require.True(t, waitOver.Load())
} }
q, err := db.Querier(context.Background(), c.mint, c.maxt) q, err := db.Querier(c.mint, c.maxt)
require.NoError(t, err) require.NoError(t, err)
checkWaiting(q) checkWaiting(q)
cq, err := db.ChunkQuerier(context.Background(), c.mint, c.maxt) cq, err := db.ChunkQuerier(c.mint, c.maxt)
require.NoError(t, err) require.NoError(t, err)
checkWaiting(cq) checkWaiting(cq)
}) })
@ -3258,7 +3359,7 @@ func TestAppendHistogram(t *testing.T) {
require.NoError(t, q.Close()) require.NoError(t, q.Close())
}) })
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, ss.Next()) require.True(t, ss.Next())
s := ss.At() s := ss.At()
@ -3911,7 +4012,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) {
require.NoError(t, q.Close()) require.NoError(t, q.Close())
}) })
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
require.True(t, ss.Next()) require.True(t, ss.Next())
s := ss.At() s := ss.At()
@ -4303,7 +4404,7 @@ func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
} }
// Query back and expect same order of samples. // Query back and expect same order of samples.
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b")) series := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
@ -5309,6 +5410,7 @@ func BenchmarkCuttingHeadHistogramChunks(b *testing.B) {
} }
func TestCuttingNewHeadChunks(t *testing.T) { func TestCuttingNewHeadChunks(t *testing.T) {
ctx := context.Background()
testCases := map[string]struct { testCases := map[string]struct {
numTotalSamples int numTotalSamples int
timestampJitter bool timestampJitter bool
@ -5442,7 +5544,7 @@ func TestCuttingNewHeadChunks(t *testing.T) {
chkReader, err := h.Chunks() chkReader, err := h.Chunks()
require.NoError(t, err) require.NoError(t, err)
p, err := idxReader.Postings("foo", "bar") p, err := idxReader.Postings(ctx, "foo", "bar")
require.NoError(t, err) require.NoError(t, err)
var lblBuilder labels.ScratchBuilder var lblBuilder labels.ScratchBuilder

View file

@ -591,9 +591,6 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
if s.T <= ms.mmMaxTime { if s.T <= ms.mmMaxTime {
continue continue
} }
if s.T <= ms.mmMaxTime {
continue
}
if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated {
h.metrics.chunksCreated.Inc() h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc() h.metrics.chunks.Inc()
@ -754,7 +751,9 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
m = len(samples) m = len(samples)
} }
for i := 0; i < concurrency; i++ { for i := 0; i < concurrency; i++ {
shards[i] = processors[i].reuseBuf() if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
}
} }
for _, sam := range samples[:m] { for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok { if r, ok := multiRef[sam.Ref]; ok {
@ -764,7 +763,10 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
shards[mod] = append(shards[mod], sam) shards[mod] = append(shards[mod], sam)
} }
for i := 0; i < concurrency; i++ { for i := 0; i < concurrency; i++ {
processors[i].input <- shards[i] if len(shards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]}
shards[i] = nil
}
} }
samples = samples[m:] samples = samples[m:]
} }
@ -790,23 +792,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.
continue continue
} }
idx := uint64(ms.ref) % uint64(concurrency) idx := uint64(ms.ref) % uint64(concurrency)
// It is possible that some old sample is being processed in processWALSamples that processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms}
// could cause race below. So we wait for the goroutine to empty input the buffer and finish
// processing all old samples after emptying the buffer.
processors[idx].waitUntilIdle()
// Lock the subset so we can modify the series object
processors[idx].mx.Lock()
// All samples till now have been m-mapped. Hence clear out the headChunk.
// In case some samples slipped through and went into m-map chunks because of changed
// chunk size parameters, we are not taking care of that here.
// TODO(codesome): see if there is a way to avoid duplicate m-map chunks if
// the size of ooo chunk was reduced between restart.
if ms.ooo != nil {
ms.ooo.oooHeadChunk = nil
}
processors[idx].mx.Unlock()
} }
default: default:
panic(fmt.Errorf("unexpected decodedCh type: %T", d)) panic(fmt.Errorf("unexpected decodedCh type: %T", d))
@ -858,14 +844,18 @@ func isErrLoadOOOWal(err error) bool {
} }
type wblSubsetProcessor struct { type wblSubsetProcessor struct {
mx sync.Mutex // Take this lock while modifying series in the subset. input chan wblSubsetProcessorInputItem
input chan []record.RefSample
output chan []record.RefSample output chan []record.RefSample
} }
type wblSubsetProcessorInputItem struct {
mmappedSeries *memSeries
samples []record.RefSample
}
func (wp *wblSubsetProcessor) setup() { func (wp *wblSubsetProcessor) setup() {
wp.output = make(chan []record.RefSample, 300) wp.output = make(chan []record.RefSample, 300)
wp.input = make(chan []record.RefSample, 300) wp.input = make(chan wblSubsetProcessorInputItem, 300)
} }
func (wp *wblSubsetProcessor) closeAndDrain() { func (wp *wblSubsetProcessor) closeAndDrain() {
@ -886,16 +876,23 @@ func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample {
// processWBLSamples adds the samples it receives to the head and passes // processWBLSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse. // the buffer received to an output channel for reuse.
// Samples before the minValidTime timestamp are discarded.
func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
defer close(wp.output) defer close(wp.output)
oooCapMax := h.opts.OutOfOrderCapMax.Load() oooCapMax := h.opts.OutOfOrderCapMax.Load()
// We don't check for minValidTime for ooo samples. // We don't check for minValidTime for ooo samples.
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
for samples := range wp.input { for in := range wp.input {
wp.mx.Lock() if in.mmappedSeries != nil && in.mmappedSeries.ooo != nil {
for _, s := range samples { // All samples till now have been m-mapped. Hence clear out the headChunk.
// In case some samples slipped through and went into m-map chunks because of changed
// chunk size parameters, we are not taking care of that here.
// TODO(codesome): see if there is a way to avoid duplicate m-map chunks if
// the size of ooo chunk was reduced between restart.
in.mmappedSeries.ooo.oooHeadChunk = nil
continue
}
for _, s := range in.samples {
ms := h.series.getByID(s.Ref) ms := h.series.getByID(s.Ref)
if ms == nil { if ms == nil {
unknownRefs++ unknownRefs++
@ -915,8 +912,10 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
} }
} }
} }
wp.mx.Unlock() select {
case wp.output <- in.samples:
default:
}
} }
h.updateMinOOOMaxOOOTime(mint, maxt) h.updateMinOOOMaxOOOTime(mint, maxt)
@ -924,21 +923,6 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
return unknownRefs return unknownRefs
} }
func (wp *wblSubsetProcessor) waitUntilIdle() {
select {
case <-wp.output: // Allow output side to drain to avoid deadlock.
default:
}
wp.input <- []record.RefSample{}
for len(wp.input) != 0 {
time.Sleep(10 * time.Microsecond)
select {
case <-wp.output: // Allow output side to drain to avoid deadlock.
default:
}
}
}
const ( const (
chunkSnapshotRecordTypeSeries uint8 = 1 chunkSnapshotRecordTypeSeries uint8 = 1
chunkSnapshotRecordTypeTombstones uint8 = 2 chunkSnapshotRecordTypeTombstones uint8 = 2

View file

@ -924,7 +924,7 @@ func (w *Writer) writePostingsToTmpFiles() error {
// Symbol numbers are in order, so the strings will also be in order. // Symbol numbers are in order, so the strings will also be in order.
slices.Sort(values) slices.Sort(values)
for _, v := range values { for _, v := range values {
value, err := w.symbols.Lookup(v) value, err := w.symbols.Lookup(w.ctx, v)
if err != nil { if err != nil {
return err return err
} }
@ -1314,7 +1314,7 @@ func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) {
return s, nil return s, nil
} }
func (s Symbols) Lookup(o uint32) (string, error) { func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) {
d := encoding.Decbuf{ d := encoding.Decbuf{
B: s.bs.Range(0, s.bs.Len()), B: s.bs.Range(0, s.bs.Len()),
} }
@ -1326,6 +1326,9 @@ func (s Symbols) Lookup(o uint32) (string, error) {
d.Skip(s.offsets[int(o/symbolFactor)]) d.Skip(s.offsets[int(o/symbolFactor)])
// Walk until we find the one we want. // Walk until we find the one we want.
for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- {
if ctx.Err() != nil {
return "", ctx.Err()
}
d.UvarintBytes() d.UvarintBytes()
} }
} else { } else {
@ -1453,11 +1456,11 @@ func (r *Reader) Close() error {
return r.c.Close() return r.c.Close()
} }
func (r *Reader) lookupSymbol(o uint32) (string, error) { func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) {
if s, ok := r.nameSymbols[o]; ok { if s, ok := r.nameSymbols[o]; ok {
return s, nil return s, nil
} }
return r.symbols.Lookup(o) return r.symbols.Lookup(ctx, o)
} }
// Symbols returns an iterator over the symbols that exist within the index. // Symbols returns an iterator over the symbols that exist within the index.
@ -1473,8 +1476,8 @@ func (r *Reader) SymbolTableSize() uint64 {
// SortedLabelValues returns value tuples that exist for the given label name. // SortedLabelValues returns value tuples that exist for the given label name.
// It is not safe to use the return value beyond the lifetime of the byte slice // It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader. // passed into the Reader.
func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
values, err := r.LabelValues(name, matchers...) values, err := r.LabelValues(ctx, name, matchers...)
if err == nil && r.version == FormatV1 { if err == nil && r.version == FormatV1 {
slices.Sort(values) slices.Sort(values)
} }
@ -1485,7 +1488,7 @@ func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]
// It is not safe to use the return value beyond the lifetime of the byte slice // It is not safe to use the return value beyond the lifetime of the byte slice
// passed into the Reader. // passed into the Reader.
// TODO(replay): Support filtering by matchers // TODO(replay): Support filtering by matchers
func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
} }
@ -1516,7 +1519,7 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string
lastVal := e[len(e)-1].value lastVal := e[len(e)-1].value
skip := 0 skip := 0
for d.Err() == nil { for d.Err() == nil && ctx.Err() == nil {
if skip == 0 { if skip == 0 {
// These are always the same number of bytes, // These are always the same number of bytes,
// and it's faster to skip than parse. // and it's faster to skip than parse.
@ -1537,15 +1540,20 @@ func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string
if d.Err() != nil { if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "get postings offset entry") return nil, errors.Wrap(d.Err(), "get postings offset entry")
} }
return values, nil
return values, ctx.Err()
} }
// LabelNamesFor returns all the label names for the series referred to by IDs. // LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted. // The names returned are sorted.
func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
// Gather offsetsMap the name offsetsMap in the symbol table first // Gather offsetsMap the name offsetsMap in the symbol table first
offsetsMap := make(map[uint32]struct{}) offsetsMap := make(map[uint32]struct{})
for _, id := range ids { for _, id := range ids {
if ctx.Err() != nil {
return nil, ctx.Err()
}
offset := id offset := id
// In version 2 series IDs are no longer exact references but series are 16-byte padded // In version 2 series IDs are no longer exact references but series are 16-byte padded
// and the ID is the multiple of 16 of the actual position. // and the ID is the multiple of 16 of the actual position.
@ -1571,7 +1579,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) {
// Lookup the unique symbols. // Lookup the unique symbols.
names := make([]string, 0, len(offsetsMap)) names := make([]string, 0, len(offsetsMap))
for off := range offsetsMap { for off := range offsetsMap {
name, err := r.lookupSymbol(off) name, err := r.lookupSymbol(ctx, off)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor") return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor")
} }
@ -1584,7 +1592,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) {
} }
// LabelValueFor returns label value for the given label name in the series referred to by ID. // LabelValueFor returns label value for the given label name in the series referred to by ID.
func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) {
offset := id offset := id
// In version 2 series IDs are no longer exact references but series are 16-byte padded // In version 2 series IDs are no longer exact references but series are 16-byte padded
// and the ID is the multiple of 16 of the actual position. // and the ID is the multiple of 16 of the actual position.
@ -1597,7 +1605,7 @@ func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, erro
return "", errors.Wrap(d.Err(), "label values for") return "", errors.Wrap(d.Err(), "label values for")
} }
value, err := r.dec.LabelValueFor(buf, label) value, err := r.dec.LabelValueFor(ctx, buf, label)
if err != nil { if err != nil {
return "", storage.ErrNotFound return "", storage.ErrNotFound
} }
@ -1624,7 +1632,7 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch
return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series") return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series")
} }
func (r *Reader) Postings(name string, values ...string) (Postings, error) { func (r *Reader) Postings(ctx context.Context, name string, values ...string) (Postings, error) {
if r.version == FormatV1 { if r.version == FormatV1 {
e, ok := r.postingsV1[name] e, ok := r.postingsV1[name]
if !ok { if !ok {
@ -1644,7 +1652,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) {
} }
res = append(res, p) res = append(res, p)
} }
return Merge(res...), nil return Merge(ctx, res...), nil
} }
e, ok := r.postings[name] e, ok := r.postings[name]
@ -1683,7 +1691,7 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) {
// Iterate on the offset table. // Iterate on the offset table.
var postingsOff uint64 // The offset into the postings table. var postingsOff uint64 // The offset into the postings table.
for d.Err() == nil { for d.Err() == nil && ctx.Err() == nil {
if skip == 0 { if skip == 0 {
// These are always the same number of bytes, // These are always the same number of bytes,
// and it's faster to skip than parse. // and it's faster to skip than parse.
@ -1720,9 +1728,12 @@ func (r *Reader) Postings(name string, values ...string) (Postings, error) {
if d.Err() != nil { if d.Err() != nil {
return nil, errors.Wrap(d.Err(), "get postings offset entry") return nil, errors.Wrap(d.Err(), "get postings offset entry")
} }
if ctx.Err() != nil {
return nil, errors.Wrap(ctx.Err(), "get postings offset entry")
}
} }
return Merge(res...), nil return Merge(ctx, res...), nil
} }
// SortedPostings returns the given postings list reordered so that the backing series // SortedPostings returns the given postings list reordered so that the backing series
@ -1789,7 +1800,7 @@ func (r *Reader) Size() int64 {
// LabelNames returns all the unique label names present in the index. // LabelNames returns all the unique label names present in the index.
// TODO(twilkie) implement support for matchers // TODO(twilkie) implement support for matchers
func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) {
if len(matchers) > 0 { if len(matchers) > 0 {
return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers)
} }
@ -1833,7 +1844,7 @@ func (s stringListIter) Err() error { return nil }
// It currently does not contain decoding methods for all entry types but can be extended // It currently does not contain decoding methods for all entry types but can be extended
// by them if there's demand. // by them if there's demand.
type Decoder struct { type Decoder struct {
LookupSymbol func(uint32) (string, error) LookupSymbol func(context.Context, uint32) (string, error)
} }
// Postings returns a postings list for b and its number of elements. // Postings returns a postings list for b and its number of elements.
@ -1870,7 +1881,7 @@ func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) {
} }
// LabelValueFor decodes a label for a given series. // LabelValueFor decodes a label for a given series.
func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) { func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) (string, error) {
d := encoding.Decbuf{B: b} d := encoding.Decbuf{B: b}
k := d.Uvarint() k := d.Uvarint()
@ -1882,13 +1893,13 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) {
return "", errors.Wrap(d.Err(), "read series label offsets") return "", errors.Wrap(d.Err(), "read series label offsets")
} }
ln, err := dec.LookupSymbol(lno) ln, err := dec.LookupSymbol(ctx, lno)
if err != nil { if err != nil {
return "", errors.Wrap(err, "lookup label name") return "", errors.Wrap(err, "lookup label name")
} }
if ln == label { if ln == label {
lv, err := dec.LookupSymbol(lvo) lv, err := dec.LookupSymbol(ctx, lvo)
if err != nil { if err != nil {
return "", errors.Wrap(err, "lookup label value") return "", errors.Wrap(err, "lookup label value")
} }
@ -1920,11 +1931,11 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu
return errors.Wrap(d.Err(), "read series label offsets") return errors.Wrap(d.Err(), "read series label offsets")
} }
ln, err := dec.LookupSymbol(lno) ln, err := dec.LookupSymbol(context.TODO(), lno)
if err != nil { if err != nil {
return errors.Wrap(err, "lookup label name") return errors.Wrap(err, "lookup label name")
} }
lv, err := dec.LookupSymbol(lvo) lv, err := dec.LookupSymbol(context.TODO(), lvo)
if err != nil { if err != nil {
return errors.Wrap(err, "lookup label value") return errors.Wrap(err, "lookup label value")
} }

View file

@ -93,7 +93,7 @@ func (m mockIndex) Close() error {
return nil return nil
} }
func (m mockIndex) LabelValues(name string) ([]string, error) { func (m mockIndex) LabelValues(_ context.Context, name string) ([]string, error) {
values := []string{} values := []string{}
for l := range m.postings { for l := range m.postings {
if l.Name == name { if l.Name == name {
@ -103,13 +103,13 @@ func (m mockIndex) LabelValues(name string) ([]string, error) {
return values, nil return values, nil
} }
func (m mockIndex) Postings(name string, values ...string) (Postings, error) { func (m mockIndex) Postings(ctx context.Context, name string, values ...string) (Postings, error) {
p := []Postings{} p := []Postings{}
for _, value := range values { for _, value := range values {
l := labels.Label{Name: name, Value: value} l := labels.Label{Name: name, Value: value}
p = append(p, m.SortedPostings(NewListPostings(m.postings[l]))) p = append(p, m.SortedPostings(NewListPostings(m.postings[l])))
} }
return Merge(p...), nil return Merge(ctx, p...), nil
} }
func (m mockIndex) SortedPostings(p Postings) Postings { func (m mockIndex) SortedPostings(p Postings) Postings {
@ -162,6 +162,7 @@ func TestIndexRW_Create_Open(t *testing.T) {
func TestIndexRW_Postings(t *testing.T) { func TestIndexRW_Postings(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
ctx := context.Background()
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
@ -194,7 +195,7 @@ func TestIndexRW_Postings(t *testing.T) {
ir, err := NewFileReader(fn) ir, err := NewFileReader(fn)
require.NoError(t, err) require.NoError(t, err)
p, err := ir.Postings("a", "1") p, err := ir.Postings(ctx, "a", "1")
require.NoError(t, err) require.NoError(t, err)
var c []chunks.Meta var c []chunks.Meta
@ -228,7 +229,7 @@ func TestIndexRW_Postings(t *testing.T) {
d := encoding.NewDecbufAt(ir.b, int(off), castagnoliTable) d := encoding.NewDecbufAt(ir.b, int(off), castagnoliTable)
require.Equal(t, 1, d.Be32int(), "Unexpected number of label indices table names") require.Equal(t, 1, d.Be32int(), "Unexpected number of label indices table names")
for i := d.Be32(); i > 0 && d.Err() == nil; i-- { for i := d.Be32(); i > 0 && d.Err() == nil; i-- {
v, err := ir.lookupSymbol(d.Be32()) v, err := ir.lookupSymbol(ctx, d.Be32())
require.NoError(t, err) require.NoError(t, err)
labelIndices[lbl] = append(labelIndices[lbl], v) labelIndices[lbl] = append(labelIndices[lbl], v)
} }
@ -253,7 +254,7 @@ func TestIndexRW_Postings(t *testing.T) {
// List all postings for a given label value. This is what we expect to get // List all postings for a given label value. This is what we expect to get
// in output from all shards. // in output from all shards.
p, err = ir.Postings("a", "1") p, err = ir.Postings(ctx, "a", "1")
require.NoError(t, err) require.NoError(t, err)
var expected []storage.SeriesRef var expected []storage.SeriesRef
@ -269,7 +270,7 @@ func TestIndexRW_Postings(t *testing.T) {
actualPostings := make([]storage.SeriesRef, 0, len(expected)) actualPostings := make([]storage.SeriesRef, 0, len(expected))
for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ { for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ {
p, err = ir.Postings("a", "1") p, err = ir.Postings(ctx, "a", "1")
require.NoError(t, err) require.NoError(t, err)
p = ir.ShardedPostings(p, shardIndex, shardCount) p = ir.ShardedPostings(p, shardIndex, shardCount)
@ -302,6 +303,7 @@ func TestIndexRW_Postings(t *testing.T) {
func TestPostingsMany(t *testing.T) { func TestPostingsMany(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
ctx := context.Background()
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
@ -370,7 +372,7 @@ func TestPostingsMany(t *testing.T) {
var builder labels.ScratchBuilder var builder labels.ScratchBuilder
for _, c := range cases { for _, c := range cases {
it, err := ir.Postings("i", c.in...) it, err := ir.Postings(ctx, "i", c.in...)
require.NoError(t, err) require.NoError(t, err)
got := []string{} got := []string{}
@ -392,6 +394,7 @@ func TestPostingsMany(t *testing.T) {
func TestPersistence_index_e2e(t *testing.T) { func TestPersistence_index_e2e(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
ctx := context.Background()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000) lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
require.NoError(t, err) require.NoError(t, err)
@ -470,10 +473,10 @@ func TestPersistence_index_e2e(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
for p := range mi.postings { for p := range mi.postings {
gotp, err := ir.Postings(p.Name, p.Value) gotp, err := ir.Postings(ctx, p.Name, p.Value)
require.NoError(t, err) require.NoError(t, err)
expp, err := mi.Postings(p.Name, p.Value) expp, err := mi.Postings(ctx, p.Name, p.Value)
require.NoError(t, err) require.NoError(t, err)
var chks, expchks []chunks.Meta var chks, expchks []chunks.Meta
@ -503,7 +506,7 @@ func TestPersistence_index_e2e(t *testing.T) {
for k, v := range labelPairs { for k, v := range labelPairs {
sort.Strings(v) sort.Strings(v)
res, err := ir.SortedLabelValues(k) res, err := ir.SortedLabelValues(ctx, k)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, len(v), len(res)) require.Equal(t, len(v), len(res))
@ -573,6 +576,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
} }
func TestSymbols(t *testing.T) { func TestSymbols(t *testing.T) {
ctx := context.Background()
buf := encoding.Encbuf{} buf := encoding.Encbuf{}
// Add prefix to the buffer to simulate symbols as part of larger buffer. // Add prefix to the buffer to simulate symbols as part of larger buffer.
@ -595,11 +599,11 @@ func TestSymbols(t *testing.T) {
require.Equal(t, 32, s.Size()) require.Equal(t, 32, s.Size())
for i := 99; i >= 0; i-- { for i := 99; i >= 0; i-- {
s, err := s.Lookup(uint32(i)) s, err := s.Lookup(ctx, uint32(i))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, string(rune(i)), s) require.Equal(t, string(rune(i)), s)
} }
_, err = s.Lookup(100) _, err = s.Lookup(ctx, 100)
require.Error(t, err) require.Error(t, err)
for i := 99; i >= 0; i-- { for i := 99; i >= 0; i-- {
@ -631,10 +635,12 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
require.NoError(b, os.RemoveAll(dir)) require.NoError(b, os.RemoveAll(dir))
}() }()
ctx := context.Background()
// Generate an index. // Generate an index.
fn := filepath.Join(dir, indexFilename) fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn) iw, err := NewWriter(ctx, fn)
require.NoError(b, err) require.NoError(b, err)
for i := 1; i <= numSeries; i++ { for i := 1; i <= numSeries; i++ {
@ -664,7 +670,7 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
allPostings, err := ir.Postings("const", fmt.Sprintf("%10d", 1)) allPostings, err := ir.Postings(ctx, "const", fmt.Sprintf("%10d", 1))
require.NoError(b, err) require.NoError(b, err)
ir.ShardedPostings(allPostings, uint64(n%numShards), numShards) ir.ShardedPostings(allPostings, uint64(n%numShards), numShards)

View file

@ -15,6 +15,7 @@ package index
import ( import (
"container/heap" "container/heap"
"context"
"encoding/binary" "encoding/binary"
"runtime" "runtime"
"sort" "sort"
@ -135,7 +136,7 @@ func (p *MemPostings) LabelNames() []string {
} }
// LabelValues returns label values for the given name. // LabelValues returns label values for the given name.
func (p *MemPostings) LabelValues(name string) []string { func (p *MemPostings) LabelValues(_ context.Context, name string) []string {
p.mtx.RLock() p.mtx.RLock()
defer p.mtx.RUnlock() defer p.mtx.RUnlock()
@ -519,7 +520,7 @@ func (it *intersectPostings) Err() error {
} }
// Merge returns a new iterator over the union of the input iterators. // Merge returns a new iterator over the union of the input iterators.
func Merge(its ...Postings) Postings { func Merge(ctx context.Context, its ...Postings) Postings {
if len(its) == 0 { if len(its) == 0 {
return EmptyPostings() return EmptyPostings()
} }
@ -527,7 +528,7 @@ func Merge(its ...Postings) Postings {
return its[0] return its[0]
} }
p, ok := newMergedPostings(its) p, ok := newMergedPostings(ctx, its)
if !ok { if !ok {
return EmptyPostings() return EmptyPostings()
} }
@ -559,12 +560,14 @@ type mergedPostings struct {
err error err error
} }
func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { func newMergedPostings(ctx context.Context, p []Postings) (m *mergedPostings, nonEmpty bool) {
ph := make(postingsHeap, 0, len(p)) ph := make(postingsHeap, 0, len(p))
for _, it := range p { for _, it := range p {
// NOTE: mergedPostings struct requires the user to issue an initial Next. // NOTE: mergedPostings struct requires the user to issue an initial Next.
switch { switch {
case ctx.Err() != nil:
return &mergedPostings{err: ctx.Err()}, true
case it.Next(): case it.Next():
ph = append(ph, it) ph = append(ph, it)
case it.Err() != nil: case it.Err() != nil:

View file

@ -385,7 +385,7 @@ func TestMultiMerge(t *testing.T) {
i2 := newListPostings(2, 4, 5, 6, 7, 8, 999, 1001) i2 := newListPostings(2, 4, 5, 6, 7, 8, 999, 1001)
i3 := newListPostings(1, 2, 5, 6, 7, 8, 1001, 1200) i3 := newListPostings(1, 2, 5, 6, 7, 8, 1001, 1200)
res, err := ExpandPostings(Merge(i1, i2, i3)) res, err := ExpandPostings(Merge(context.Background(), i1, i2, i3))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res) require.Equal(t, []storage.SeriesRef{1, 2, 3, 4, 5, 6, 7, 8, 999, 1000, 1001, 1200}, res)
} }
@ -473,10 +473,12 @@ func TestMergedPostings(t *testing.T) {
t.Fatal("merge result expectancy cannot be nil") t.Fatal("merge result expectancy cannot be nil")
} }
ctx := context.Background()
expected, err := ExpandPostings(c.res) expected, err := ExpandPostings(c.res)
require.NoError(t, err) require.NoError(t, err)
m := Merge(c.in...) m := Merge(ctx, c.in...)
if c.res == EmptyPostings() { if c.res == EmptyPostings() {
require.Equal(t, EmptyPostings(), m) require.Equal(t, EmptyPostings(), m)
@ -537,10 +539,12 @@ func TestMergedPostingsSeek(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
ctx := context.Background()
a := newListPostings(c.a...) a := newListPostings(c.a...)
b := newListPostings(c.b...) b := newListPostings(c.b...)
p := Merge(a, b) p := Merge(ctx, a, b)
require.Equal(t, c.success, p.Seek(c.seek)) require.Equal(t, c.success, p.Seek(c.seek))
@ -796,6 +800,7 @@ func TestIntersectWithMerge(t *testing.T) {
a := newListPostings(21, 22, 23, 24, 25, 30) a := newListPostings(21, 22, 23, 24, 25, 30)
b := Merge( b := Merge(
context.Background(),
newListPostings(10, 20, 30), newListPostings(10, 20, 30),
newListPostings(15, 26, 30), newListPostings(15, 26, 30),
) )

View file

@ -15,6 +15,7 @@
package tsdb package tsdb
import ( import (
"context"
"errors" "errors"
"math" "math"
@ -156,23 +157,23 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// PostingsForMatchers needs to be overridden so that the right IndexReader // PostingsForMatchers needs to be overridden so that the right IndexReader
// implementation gets passed down to the PostingsForMatchers call. // implementation gets passed down to the PostingsForMatchers call.
func (oh *OOOHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (oh *OOOHeadIndexReader) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
return oh.head.pfmc.PostingsForMatchers(oh, concurrent, ms...) return oh.head.pfmc.PostingsForMatchers(ctx, oh, concurrent, ms...)
} }
// LabelValues needs to be overridden from the headIndexReader implementation due // LabelValues needs to be overridden from the headIndexReader implementation due
// to the check that happens at the beginning where we make sure that the query // to the check that happens at the beginning where we make sure that the query
// interval overlaps with the head minooot and maxooot. // interval overlaps with the head minooot and maxooot.
func (oh *OOOHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() { if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() {
return []string{}, nil return []string{}, nil
} }
if len(matchers) == 0 { if len(matchers) == 0 {
return oh.head.postings.LabelValues(name), nil return oh.head.postings.LabelValues(ctx, name), nil
} }
return labelValuesWithMatchers(oh, name, matchers...) return labelValuesWithMatchers(ctx, oh, name, matchers...)
} }
type chunkMetaAndChunkDiskMapperRef struct { type chunkMetaAndChunkDiskMapperRef struct {
@ -196,7 +197,7 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) bool {
return a.MinTime < b.MinTime return a.MinTime < b.MinTime
} }
func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
switch len(values) { switch len(values) {
case 0: case 0:
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
@ -208,7 +209,7 @@ func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Pos
for _, value := range values { for _, value := range values {
res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings
} }
return index.Merge(res...), nil return index.Merge(ctx, res...), nil
} }
} }
@ -274,7 +275,7 @@ type OOOCompactionHead struct {
// 4. Cuts a new WBL file for the OOO WBL. // 4. Cuts a new WBL file for the OOO WBL.
// All the above together have a bit of CPU and memory overhead, and can have a bit of impact // All the above together have a bit of CPU and memory overhead, and can have a bit of impact
// on the sample append latency. So call NewOOOCompactionHead only right before compaction. // on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) {
ch := &OOOCompactionHead{ ch := &OOOCompactionHead{
chunkRange: head.chunkRange.Load(), chunkRange: head.chunkRange.Load(),
mint: math.MaxInt64, mint: math.MaxInt64,
@ -293,7 +294,7 @@ func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) {
n, v := index.AllPostingsKey() n, v := index.AllPostingsKey()
// TODO: verify this gets only ooo samples. // TODO: verify this gets only ooo samples.
p, err := ch.oooIR.Postings(n, v) p, err := ch.oooIR.Postings(ctx, n, v)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -402,7 +403,7 @@ func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter {
return ir.ch.oooIR.Symbols() return ir.ch.oooIR.Symbols()
} }
func (ir *OOOCompactionHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) {
n, v := index.AllPostingsKey() n, v := index.AllPostingsKey()
if name != n || len(values) != 1 || values[0] != v { if name != n || len(values) != 1 || values[0] != v {
return nil, errors.New("only AllPostingsKey is supported") return nil, errors.New("only AllPostingsKey is supported")
@ -423,27 +424,27 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l
return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef) return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef)
} }
func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
func (ir *OOOCompactionHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
func (ir *OOOCompactionHeadIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }
func (ir *OOOCompactionHeadIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
return "", errors.New("not implemented") return "", errors.New("not implemented")
} }
func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
return nil, errors.New("not implemented") return nil, errors.New("not implemented")
} }

View file

@ -378,6 +378,8 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true) head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true)
t.Cleanup(func() { require.NoError(t, head.Close()) }) t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
app := head.Appender(context.Background()) app := head.Appender(context.Background())
// Add in-order samples // Add in-order samples
@ -437,24 +439,24 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
// We first want to test using a head index reader that covers the biggest query interval // We first want to test using a head index reader that covers the biggest query interval
oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT) oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")}
values, err := oh.LabelValues("foo", matchers...) values, err := oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values) sort.Strings(values)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expValues1, values) require.Equal(t, tc.expValues1, values)
matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^bar.")} matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^bar.")}
values, err = oh.LabelValues("foo", matchers...) values, err = oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values) sort.Strings(values)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expValues2, values) require.Equal(t, tc.expValues2, values)
matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.")} matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.")}
values, err = oh.LabelValues("foo", matchers...) values, err = oh.LabelValues(ctx, "foo", matchers...)
sort.Strings(values) sort.Strings(values)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expValues3, values) require.Equal(t, tc.expValues3, values)
values, err = oh.LabelValues("foo") values, err = oh.LabelValues(ctx, "foo")
sort.Strings(values) sort.Strings(values)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expValues4, values) require.Equal(t, tc.expValues4, values)

View file

@ -2,6 +2,7 @@ package tsdb
import ( import (
"container/list" "container/list"
"context"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -18,13 +19,13 @@ const (
// IndexPostingsReader is a subset of IndexReader methods, the minimum required to evaluate PostingsForMatchers // IndexPostingsReader is a subset of IndexReader methods, the minimum required to evaluate PostingsForMatchers
type IndexPostingsReader interface { type IndexPostingsReader interface {
// LabelValues returns possible label values which may not be sorted. // LabelValues returns possible label values which may not be sorted.
LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
// Postings returns the postings list iterator for the label pairs. // Postings returns the postings list iterator for the label pairs.
// The Postings here contain the offsets to the series inside the index. // The Postings here contain the offsets to the series inside the index.
// Found IDs are not strictly required to point to a valid Series, e.g. // Found IDs are not strictly required to point to a valid Series, e.g.
// during background garbage collections. Input values must be sorted. // during background garbage collections. Input values must be sorted.
Postings(name string, values ...string) (index.Postings, error) Postings(ctx context.Context, name string, values ...string) (index.Postings, error)
} }
// NewPostingsForMatchersCache creates a new PostingsForMatchersCache. // NewPostingsForMatchersCache creates a new PostingsForMatchersCache.
@ -63,15 +64,15 @@ type PostingsForMatchersCache struct {
postingsForMatchers func(ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error) postingsForMatchers func(ix IndexPostingsReader, ms ...*labels.Matcher) (index.Postings, error)
} }
func (c *PostingsForMatchersCache) PostingsForMatchers(ix IndexPostingsReader, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (c *PostingsForMatchersCache) PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
if !concurrent && !c.force { if !concurrent && !c.force {
return c.postingsForMatchers(ix, ms...) return c.postingsForMatchers(ix, ms...)
} }
c.expire() c.expire()
return c.postingsForMatchersPromise(ix, ms)() return c.postingsForMatchersPromise(ctx, ix, ms)()
} }
func (c *PostingsForMatchersCache) postingsForMatchersPromise(ix IndexPostingsReader, ms []*labels.Matcher) func() (index.Postings, error) { func (c *PostingsForMatchersCache) postingsForMatchersPromise(_ context.Context, ix IndexPostingsReader, ms []*labels.Matcher) func() (index.Postings, error) {
var ( var (
wg sync.WaitGroup wg sync.WaitGroup
cloner *index.PostingsCloner cloner *index.PostingsCloner
@ -198,8 +199,8 @@ type indexReaderWithPostingsForMatchers struct {
pfmc *PostingsForMatchersCache pfmc *PostingsForMatchersCache
} }
func (ir indexReaderWithPostingsForMatchers) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (ir indexReaderWithPostingsForMatchers) PostingsForMatchers(ctx context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
return ir.pfmc.PostingsForMatchers(ir, concurrent, ms...) return ir.pfmc.PostingsForMatchers(ctx, ir, concurrent, ms...)
} }
var _ IndexReader = indexReaderWithPostingsForMatchers{} var _ IndexReader = indexReaderWithPostingsForMatchers{}

View file

@ -1,6 +1,7 @@
package tsdb package tsdb
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"sync" "sync"
@ -26,6 +27,8 @@ func TestPostingsForMatchersCache(t *testing.T) {
return c return c
} }
ctx := context.Background()
t.Run("happy case one call", func(t *testing.T) { t.Run("happy case one call", func(t *testing.T) {
for _, concurrent := range []bool{true, false} { for _, concurrent := range []bool{true, false} {
t.Run(fmt.Sprintf("concurrent=%t", concurrent), func(t *testing.T) { t.Run(fmt.Sprintf("concurrent=%t", concurrent), func(t *testing.T) {
@ -38,7 +41,7 @@ func TestPostingsForMatchersCache(t *testing.T) {
return index.ErrPostings(expectedPostingsErr), nil return index.ErrPostings(expectedPostingsErr), nil
}, &timeNowMock{}, false) }, &timeNowMock{}, false)
p, err := c.PostingsForMatchers(indexForPostingsMock{}, concurrent, expectedMatchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, concurrent, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, p) require.NotNil(t, p)
require.Equal(t, p.Err(), expectedPostingsErr, "Expected ErrPostings with err %q, got %T with err %q", expectedPostingsErr, p, p.Err()) require.Equal(t, p.Err(), expectedPostingsErr, "Expected ErrPostings with err %q, got %T with err %q", expectedPostingsErr, p, p.Err())
@ -54,7 +57,7 @@ func TestPostingsForMatchersCache(t *testing.T) {
return nil, expectedErr return nil, expectedErr
}, &timeNowMock{}, false) }, &timeNowMock{}, false)
_, err := c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) _, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.Equal(t, expectedErr, err) require.Equal(t, expectedErr, err)
}) })
@ -114,7 +117,7 @@ func TestPostingsForMatchersCache(t *testing.T) {
// perform all calls // perform all calls
for i := 0; i < len(calls); i++ { for i := 0; i < len(calls); i++ {
go func(i int) { go func(i int) {
_, err := c.PostingsForMatchers(indexForPostingsMock{}, concurrent, calls[i]...) _, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, concurrent, calls[i]...)
results[i] = err.Error() results[i] = err.Error()
resultsWg.Done() resultsWg.Done()
}(i) }(i)
@ -151,12 +154,12 @@ func TestPostingsForMatchersCache(t *testing.T) {
}, &timeNowMock{}, false) }, &timeNowMock{}, false)
// first call, fills the cache // first call, fills the cache
p, err := c.PostingsForMatchers(indexForPostingsMock{}, false, expectedMatchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, false, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
// second call within the ttl (we didn't advance the time), should call again because concurrent==false // second call within the ttl (we didn't advance the time), should call again because concurrent==false
p, err = c.PostingsForMatchers(indexForPostingsMock{}, false, expectedMatchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, false, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 2") require.EqualError(t, p.Err(), "result from call 2")
}) })
@ -171,12 +174,12 @@ func TestPostingsForMatchersCache(t *testing.T) {
}, &timeNowMock{}, false) }, &timeNowMock{}, false)
// first call, fills the cache // first call, fills the cache
p, err := c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
// second call within the ttl (we didn't advance the time), should call again because concurrent==false // second call within the ttl (we didn't advance the time), should call again because concurrent==false
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 2") require.EqualError(t, p.Err(), "result from call 2")
}) })
@ -194,21 +197,21 @@ func TestPostingsForMatchersCache(t *testing.T) {
}, timeNow, false) }, timeNow, false)
// first call, fills the cache // first call, fills the cache
p, err := c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
timeNow.advance(defaultPostingsForMatchersCacheTTL / 2) timeNow.advance(defaultPostingsForMatchersCacheTTL / 2)
// second call within the ttl, should use the cache // second call within the ttl, should use the cache
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
timeNow.advance(defaultPostingsForMatchersCacheTTL / 2) timeNow.advance(defaultPostingsForMatchersCacheTTL / 2)
// third call is after ttl (exactly), should call again // third call is after ttl (exactly), should call again
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, expectedMatchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, expectedMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 2") require.EqualError(t, p.Err(), "result from call 2")
}) })
@ -230,12 +233,12 @@ func TestPostingsForMatchersCache(t *testing.T) {
// each one of the first testCacheSize calls is cached properly // each one of the first testCacheSize calls is cached properly
for _, matchers := range calls { for _, matchers := range calls {
// first call // first call
p, err := c.PostingsForMatchers(indexForPostingsMock{}, true, matchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, matchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
// cached value // cached value
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, matchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, matchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
} }
@ -243,17 +246,17 @@ func TestPostingsForMatchersCache(t *testing.T) {
// one extra call is made, which is cached properly, but evicts the first cached value // one extra call is made, which is cached properly, but evicts the first cached value
someExtraMatchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")} someExtraMatchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")}
// first call // first call
p, err := c.PostingsForMatchers(indexForPostingsMock{}, true, someExtraMatchers...) p, err := c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, someExtraMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
// cached value // cached value
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, someExtraMatchers...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, someExtraMatchers...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 1") require.EqualError(t, p.Err(), "result from call 1")
// make first call again, it's calculated again // make first call again, it's calculated again
p, err = c.PostingsForMatchers(indexForPostingsMock{}, true, calls[0]...) p, err = c.PostingsForMatchers(ctx, indexForPostingsMock{}, true, calls[0]...)
require.NoError(t, err) require.NoError(t, err)
require.EqualError(t, p.Err(), "result from call 2") require.EqualError(t, p.Err(), "result from call 2")
}) })
@ -261,11 +264,11 @@ func TestPostingsForMatchersCache(t *testing.T) {
type indexForPostingsMock struct{} type indexForPostingsMock struct{}
func (idx indexForPostingsMock) LabelValues(string, ...*labels.Matcher) ([]string, error) { func (idx indexForPostingsMock) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
panic("implement me") panic("implement me")
} }
func (idx indexForPostingsMock) Postings(string, ...string) (index.Postings, error) { func (idx indexForPostingsMock) Postings(context.Context, string, ...string) (index.Postings, error) {
panic("implement me") panic("implement me")
} }

View file

@ -14,6 +14,7 @@
package tsdb package tsdb
import ( import (
"context"
"fmt" "fmt"
"math" "math"
@ -29,6 +30,7 @@ import (
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
) )
type blockBaseQuerier struct { type blockBaseQuerier struct {
@ -72,13 +74,13 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil }, nil
} }
func (q *blockBaseQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(name, matchers...) res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err return res, nil, err
} }
func (q *blockBaseQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(matchers...) res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err return res, nil, err
} }
@ -109,12 +111,12 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
return &blockQuerier{blockBaseQuerier: q}, nil return &blockQuerier{blockBaseQuerier: q}, nil
} }
func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
mint := q.mint mint := q.mint
maxt := q.maxt maxt := q.maxt
disableTrimming := false disableTrimming := false
sharded := hints != nil && hints.ShardCount > 0 sharded := hints != nil && hints.ShardCount > 0
p, err := q.index.PostingsForMatchers(sharded, ms...) p, err := q.index.PostingsForMatchers(ctx, sharded, ms...)
if err != nil { if err != nil {
return storage.ErrSeriesSet(err) return storage.ErrSeriesSet(err)
} }
@ -152,7 +154,7 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier
return &blockChunkQuerier{blockBaseQuerier: q}, nil return &blockChunkQuerier{blockBaseQuerier: q}, nil
} }
func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
mint := q.mint mint := q.mint
maxt := q.maxt maxt := q.maxt
disableTrimming := false disableTrimming := false
@ -162,7 +164,7 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints,
disableTrimming = hints.DisableTrimming disableTrimming = hints.DisableTrimming
} }
sharded := hints != nil && hints.ShardCount > 0 sharded := hints != nil && hints.ShardCount > 0
p, err := q.index.PostingsForMatchers(sharded, ms...) p, err := q.index.PostingsForMatchers(ctx, sharded, ms...)
if err != nil { if err != nil {
return storage.ErrChunkSeriesSet(err) return storage.ErrChunkSeriesSet(err)
} }
@ -207,7 +209,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P
// We prefer to get AllPostings so that the base of subtraction (i.e. allPostings) // We prefer to get AllPostings so that the base of subtraction (i.e. allPostings)
// doesn't include series that may be added to the index reader during this function call. // doesn't include series that may be added to the index reader during this function call.
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v) allPostings, err := ix.Postings(context.TODO(), k, v)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -227,7 +229,7 @@ func PostingsForMatchers(ix IndexPostingsReader, ms ...*labels.Matcher) (index.P
switch { switch {
case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least. case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least.
k, v := index.AllPostingsKey() k, v := index.AllPostingsKey()
allPostings, err := ix.Postings(k, v) allPostings, err := ix.Postings(context.TODO(), k, v)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -304,18 +306,18 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
// Fast-path for equal matching. // Fast-path for equal matching.
if m.Type == labels.MatchEqual { if m.Type == labels.MatchEqual {
return ix.Postings(m.Name, m.Value) return ix.Postings(context.TODO(), m.Name, m.Value)
} }
// Fast-path for set matching. // Fast-path for set matching.
if m.Type == labels.MatchRegexp { if m.Type == labels.MatchRegexp {
setMatches := m.SetMatches() setMatches := m.SetMatches()
if len(setMatches) > 0 { if len(setMatches) > 0 {
return ix.Postings(m.Name, setMatches...) return ix.Postings(context.TODO(), m.Name, setMatches...)
} }
} }
vals, err := ix.LabelValues(m.Name) vals, err := ix.LabelValues(context.TODO(), m.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -331,7 +333,7 @@ func postingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index.Postin
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
return ix.Postings(m.Name, res...) return ix.Postings(context.TODO(), m.Name, res...)
} }
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. // inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
@ -342,17 +344,17 @@ func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index
if m.Type == labels.MatchNotRegexp { if m.Type == labels.MatchNotRegexp {
setMatches := m.SetMatches() setMatches := m.SetMatches()
if len(setMatches) > 0 { if len(setMatches) > 0 {
return ix.Postings(m.Name, setMatches...) return ix.Postings(context.TODO(), m.Name, setMatches...)
} }
} }
// Fast-path for MatchNotEqual matching. // Fast-path for MatchNotEqual matching.
// Inverse of a MatchNotEqual is MatchEqual (double negation). // Inverse of a MatchNotEqual is MatchEqual (double negation).
if m.Type == labels.MatchNotEqual { if m.Type == labels.MatchNotEqual {
return ix.Postings(m.Name, m.Value) return ix.Postings(context.TODO(), m.Name, m.Value)
} }
vals, err := ix.LabelValues(m.Name) vals, err := ix.LabelValues(context.TODO(), m.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -369,18 +371,18 @@ func inversePostingsForMatcher(ix IndexPostingsReader, m *labels.Matcher) (index
} }
} }
return ix.Postings(m.Name, res...) return ix.Postings(context.TODO(), m.Name, res...)
} }
const maxExpandedPostingsFactor = 100 // Division factor for maximum number of matched series. const maxExpandedPostingsFactor = 100 // Division factor for maximum number of matched series.
func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) {
p, err := PostingsForMatchers(r, matchers...) p, err := PostingsForMatchers(r, matchers...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "fetching postings for matchers") return nil, errors.Wrap(err, "fetching postings for matchers")
} }
allValues, err := r.LabelValues(name) allValues, err := r.LabelValues(ctx, name)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "fetching values of label %s", name) return nil, errors.Wrapf(err, "fetching values of label %s", name)
} }
@ -434,7 +436,7 @@ func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Mat
valuesPostings := make([]index.Postings, len(allValues)) valuesPostings := make([]index.Postings, len(allValues))
for i, value := range allValues { for i, value := range allValues {
valuesPostings[i], err = r.Postings(name, value) valuesPostings[i], err = r.Postings(ctx, name, value)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value) return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value)
} }
@ -533,8 +535,8 @@ func (p *prependPostings) Err() error {
return p.rest.Err() return p.rest.Err()
} }
func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]string, error) { func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*labels.Matcher) ([]string, error) {
p, err := r.PostingsForMatchers(false, matchers...) p, err := r.PostingsForMatchers(ctx, false, matchers...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -547,7 +549,7 @@ func labelNamesWithMatchers(r IndexReader, matchers ...*labels.Matcher) ([]strin
return nil, errors.Wrapf(p.Err(), "postings for label names with matchers") return nil, errors.Wrapf(p.Err(), "postings for label names with matchers")
} }
return r.LabelNamesFor(postings...) return r.LabelNamesFor(ctx, postings...)
} }
// seriesData, used inside other iterators, are updated when we move from one series to another. // seriesData, used inside other iterators, are updated when we move from one series to another.
@ -667,7 +669,7 @@ func (b *blockBaseSeriesSet) Err() error {
return b.p.Err() return b.p.Err()
} }
func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil } func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
// populateWithDelGenericSeriesIterator allows to iterate over given chunk // populateWithDelGenericSeriesIterator allows to iterate over given chunk
// metas. In each iteration it ensures that chunks are trimmed based on given // metas. In each iteration it ensures that chunks are trimmed based on given

View file

@ -188,6 +188,8 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
nonPrimesTimes := labels.MustNewMatcher(labels.MatchEqual, "i_times_n", "20") // 1*20, 2*10, 4*5, 5*4 nonPrimesTimes := labels.MustNewMatcher(labels.MatchEqual, "i_times_n", "20") // 1*20, 2*10, 4*5, 5*4
times12 := labels.MustNewMatcher(labels.MatchRegexp, "i_times_n", "12.*") times12 := labels.MustNewMatcher(labels.MatchRegexp, "i_times_n", "12.*")
ctx := context.Background()
cases := []struct { cases := []struct {
name string name string
labelName string labelName string
@ -213,7 +215,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
for _, c := range cases { for _, c := range cases {
b.Run(c.name, func(b *testing.B) { b.Run(c.name, func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := labelValuesWithMatchers(ir, c.labelName, c.matchers...) _, err := labelValuesWithMatchers(ctx, ir, c.labelName, c.matchers...)
require.NoError(b, err) require.NoError(b, err)
} }
}) })
@ -278,7 +280,7 @@ func BenchmarkQuerierSelect(b *testing.B) {
} }
} }
ss := q.Select(sorted, hints, matcher) ss := q.Select(context.Background(), sorted, hints, matcher)
for ss.Next() { // nolint:revive for ss.Next() { // nolint:revive
} }
require.NoError(b, ss.Err()) require.NoError(b, ss.Err())

View file

@ -38,20 +38,21 @@ import (
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/annotations"
) )
// TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet. // TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet.
type mockSeriesSet struct { type mockSeriesSet struct {
next func() bool next func() bool
series func() storage.Series series func() storage.Series
ws func() storage.Warnings ws func() annotations.Annotations
err func() error err func() error
} }
func (m *mockSeriesSet) Next() bool { return m.next() } func (m *mockSeriesSet) Next() bool { return m.next() }
func (m *mockSeriesSet) At() storage.Series { return m.series() } func (m *mockSeriesSet) At() storage.Series { return m.series() }
func (m *mockSeriesSet) Err() error { return m.err() } func (m *mockSeriesSet) Err() error { return m.err() }
func (m *mockSeriesSet) Warnings() storage.Warnings { return m.ws() } func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.ws() }
func newMockSeriesSet(list []storage.Series) *mockSeriesSet { func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
i := -1 i := -1
@ -64,21 +65,21 @@ func newMockSeriesSet(list []storage.Series) *mockSeriesSet {
return list[i] return list[i]
}, },
err: func() error { return nil }, err: func() error { return nil },
ws: func() storage.Warnings { return nil }, ws: func() annotations.Annotations { return nil },
} }
} }
type mockChunkSeriesSet struct { type mockChunkSeriesSet struct {
next func() bool next func() bool
series func() storage.ChunkSeries series func() storage.ChunkSeries
ws func() storage.Warnings ws func() annotations.Annotations
err func() error err func() error
} }
func (m *mockChunkSeriesSet) Next() bool { return m.next() } func (m *mockChunkSeriesSet) Next() bool { return m.next() }
func (m *mockChunkSeriesSet) At() storage.ChunkSeries { return m.series() } func (m *mockChunkSeriesSet) At() storage.ChunkSeries { return m.series() }
func (m *mockChunkSeriesSet) Err() error { return m.err() } func (m *mockChunkSeriesSet) Err() error { return m.err() }
func (m *mockChunkSeriesSet) Warnings() storage.Warnings { return m.ws() } func (m *mockChunkSeriesSet) Warnings() annotations.Annotations { return m.ws() }
func newMockChunkSeriesSet(list []storage.ChunkSeries) *mockChunkSeriesSet { func newMockChunkSeriesSet(list []storage.ChunkSeries) *mockChunkSeriesSet {
i := -1 i := -1
@ -91,7 +92,7 @@ func newMockChunkSeriesSet(list []storage.ChunkSeries) *mockChunkSeriesSet {
return list[i] return list[i]
}, },
err: func() error { return nil }, err: func() error { return nil },
ws: func() storage.Warnings { return nil }, ws: func() annotations.Annotations { return nil },
} }
} }
@ -182,7 +183,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
}, },
} }
res := q.Select(false, c.hints, c.ms...) res := q.Select(context.Background(), false, c.hints, c.ms...)
defer func() { require.NoError(t, q.Close()) }() defer func() { require.NoError(t, q.Close()) }()
for { for {
@ -217,7 +218,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C
maxt: c.maxt, maxt: c.maxt,
}, },
} }
res := q.Select(false, c.hints, c.ms...) res := q.Select(context.Background(), false, c.hints, c.ms...)
defer func() { require.NoError(t, q.Close()) }() defer func() { require.NoError(t, q.Close()) }()
for { for {
@ -544,6 +545,7 @@ func TestBlockQuerier_AgainstHeadWithOpenChunks(t *testing.T) {
} }
func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) { func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing.T) {
ctx := context.Background()
c := blockQuerierTestCase{ c := blockQuerierTestCase{
mint: 2, mint: 2,
maxt: 6, maxt: 6,
@ -567,7 +569,7 @@ func TestBlockQuerier_TrimmingDoesNotModifyOriginalTombstoneIntervals(t *testing
} }
ir, cr, _, _ := createIdxChkReaders(t, testData) ir, cr, _, _ := createIdxChkReaders(t, testData)
stones := tombstones.NewMemTombstones() stones := tombstones.NewMemTombstones()
p, err := ir.Postings("a", "a") p, err := ir.Postings(ctx, "a", "a")
require.NoError(t, err) require.NoError(t, err)
refs, err := index.ExpandPostings(p) refs, err := index.ExpandPostings(p)
require.NoError(t, err) require.NoError(t, err)
@ -1492,13 +1494,13 @@ func (m mockIndex) Close() error {
return nil return nil
} }
func (m mockIndex) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (m mockIndex) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
values, _ := m.LabelValues(name, matchers...) values, _ := m.LabelValues(ctx, name, matchers...)
sort.Strings(values) sort.Strings(values)
return values, nil return values, nil
} }
func (m mockIndex) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (m mockIndex) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
var values []string var values []string
if len(matchers) == 0 { if len(matchers) == 0 {
@ -1522,11 +1524,11 @@ func (m mockIndex) LabelValues(name string, matchers ...*labels.Matcher) ([]stri
return values, nil return values, nil
} }
func (m mockIndex) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (m mockIndex) LabelValueFor(_ context.Context, id storage.SeriesRef, label string) (string, error) {
return m.series[id].l.Get(label), nil return m.series[id].l.Get(label), nil
} }
func (m mockIndex) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (m mockIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
namesMap := make(map[string]bool) namesMap := make(map[string]bool)
for _, id := range ids { for _, id := range ids {
m.series[id].l.Range(func(lbl labels.Label) { m.series[id].l.Range(func(lbl labels.Label) {
@ -1540,13 +1542,13 @@ func (m mockIndex) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) {
return names, nil return names, nil
} }
func (m mockIndex) Postings(name string, values ...string) (index.Postings, error) { func (m mockIndex) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
res := make([]index.Postings, 0, len(values)) res := make([]index.Postings, 0, len(values))
for _, value := range values { for _, value := range values {
l := labels.Label{Name: name, Value: value} l := labels.Label{Name: name, Value: value}
res = append(res, index.NewListPostings(m.postings[l])) res = append(res, index.NewListPostings(m.postings[l]))
} }
return index.Merge(res...), nil return index.Merge(ctx, res...), nil
} }
func (m mockIndex) SortedPostings(p index.Postings) index.Postings { func (m mockIndex) SortedPostings(p index.Postings) index.Postings {
@ -1561,7 +1563,7 @@ func (m mockIndex) SortedPostings(p index.Postings) index.Postings {
return index.NewListPostings(ep) return index.NewListPostings(ep)
} }
func (m mockIndex) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { func (m mockIndex) PostingsForMatchers(_ context.Context, concurrent bool, ms ...*labels.Matcher) (index.Postings, error) {
var ps []storage.SeriesRef var ps []storage.SeriesRef
for p, s := range m.series { for p, s := range m.series {
if matches(ms, s.l) { if matches(ms, s.l) {
@ -1614,7 +1616,7 @@ func (m mockIndex) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder,
return nil return nil
} }
func (m mockIndex) LabelNames(matchers ...*labels.Matcher) ([]string, error) { func (m mockIndex) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) {
names := map[string]struct{}{} names := map[string]struct{}{}
if len(matchers) == 0 { if len(matchers) == 0 {
for l := range m.postings { for l := range m.postings {
@ -1771,7 +1773,7 @@ func BenchmarkQuerySeek(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
var it chunkenc.Iterator var it chunkenc.Iterator
ss := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) ss := sq.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*"))
for ss.Next() { for ss.Next() {
it = ss.At().Iterator(it) it = ss.At().Iterator(it)
for t := mint; t <= maxt; t++ { for t := mint; t <= maxt; t++ {
@ -1904,7 +1906,7 @@ func BenchmarkSetMatcher(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ss := sq.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern)) ss := sq.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
for ss.Next() { for ss.Next() {
} }
require.NoError(b, ss.Err()) require.NoError(b, ss.Err())
@ -2253,10 +2255,10 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
t.Cleanup(cancel) t.Cleanup(cancel)
for i := 0; i < testRepeats; i++ { for i := 0; i < testRepeats; i++ {
q, err := db.Querier(ctx, math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
values, _, err := q.LabelValues("seq", c.matchers...) values, _, err := q.LabelValues(ctx, "seq", c.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Emptyf(t, values, `label values for label "seq" should be empty`) require.Emptyf(t, values, `label values for label "seq" should be empty`)
} }
@ -2294,7 +2296,7 @@ func TestClose(t *testing.T) {
require.NoError(t, db.Close()) require.NoError(t, db.Close())
}() }()
q, err := db.Querier(context.TODO(), 0, 20) q, err := db.Querier(0, 20)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, q.Close()) require.NoError(t, q.Close())
require.Error(t, q.Close()) require.Error(t, q.Close())
@ -2427,7 +2429,7 @@ func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors la
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss := q.Select(false, nil, selectors...) ss := q.Select(context.Background(), false, nil, selectors...)
var actualExpansions int var actualExpansions int
var it chunkenc.Iterator var it chunkenc.Iterator
for ss.Next() { for ss.Next() {
@ -2454,24 +2456,24 @@ func (m mockMatcherIndex) Symbols() index.StringIter { return nil }
func (m mockMatcherIndex) Close() error { return nil } func (m mockMatcherIndex) Close() error { return nil }
// SortedLabelValues will return error if it is called. // SortedLabelValues will return error if it is called.
func (m mockMatcherIndex) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (m mockMatcherIndex) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
return []string{}, errors.New("sorted label values called") return []string{}, errors.New("sorted label values called")
} }
// LabelValues will return error if it is called. // LabelValues will return error if it is called.
func (m mockMatcherIndex) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { func (m mockMatcherIndex) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) {
return []string{}, errors.New("label values called") return []string{}, errors.New("label values called")
} }
func (m mockMatcherIndex) LabelValueFor(id storage.SeriesRef, label string) (string, error) { func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) {
return "", errors.New("label value for called") return "", errors.New("label value for called")
} }
func (m mockMatcherIndex) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) {
return nil, errors.New("label names for for called") return nil, errors.New("label names for for called")
} }
func (m mockMatcherIndex) Postings(name string, values ...string) (index.Postings, error) { func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) {
return index.EmptyPostings(), nil return index.EmptyPostings(), nil
} }
@ -2491,7 +2493,7 @@ func (m mockMatcherIndex) Series(ref storage.SeriesRef, builder *labels.ScratchB
return nil return nil
} }
func (m mockMatcherIndex) LabelNames(...*labels.Matcher) ([]string, error) { func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) {
return []string{}, nil return []string{}, nil
} }
@ -2655,7 +2657,7 @@ func BenchmarkHeadChunkQuerier(b *testing.B) {
} }
require.NoError(b, app.Commit()) require.NoError(b, app.Commit())
querier, err := db.ChunkQuerier(context.Background(), math.MinInt64, math.MaxInt64) querier, err := db.ChunkQuerier(math.MinInt64, math.MaxInt64)
require.NoError(b, err) require.NoError(b, err)
defer func(q storage.ChunkQuerier) { defer func(q storage.ChunkQuerier) {
require.NoError(b, q.Close()) require.NoError(b, q.Close())
@ -2663,7 +2665,7 @@ func BenchmarkHeadChunkQuerier(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
total := 0 total := 0
for ss.Next() { for ss.Next() {
cs := ss.At() cs := ss.At()
@ -2700,7 +2702,7 @@ func BenchmarkHeadQuerier(b *testing.B) {
} }
require.NoError(b, app.Commit()) require.NoError(b, app.Commit())
querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64) querier, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(b, err) require.NoError(b, err)
defer func(q storage.Querier) { defer func(q storage.Querier) {
require.NoError(b, q.Close()) require.NoError(b, q.Close())
@ -2708,7 +2710,7 @@ func BenchmarkHeadQuerier(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*")) ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
total := int64(0) total := int64(0)
for ss.Next() { for ss.Next() {
cs := ss.At() cs := ss.At()
@ -2726,6 +2728,7 @@ func BenchmarkHeadQuerier(b *testing.B) {
// This is a regression test for the case where gauge histograms were not handled by // This is a regression test for the case where gauge histograms were not handled by
// populateWithDelChunkSeriesIterator correctly. // populateWithDelChunkSeriesIterator correctly.
func TestQueryWithDeletedHistograms(t *testing.T) { func TestQueryWithDeletedHistograms(t *testing.T) {
ctx := context.Background()
testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){ testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) { "intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestHistogram(i), nil return tsdbutil.GenerateTestHistogram(i), nil
@ -2770,13 +2773,13 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Delete the last 20. // Delete the last 20.
err = db.Delete(80, 100, matcher) err = db.Delete(ctx, 80, 100, matcher)
require.NoError(t, err) require.NoError(t, err)
chunkQuerier, err := db.ChunkQuerier(context.Background(), 0, 100) chunkQuerier, err := db.ChunkQuerier(0, 100)
require.NoError(t, err) require.NoError(t, err)
css := chunkQuerier.Select(false, nil, matcher) css := chunkQuerier.Select(context.Background(), false, nil, matcher)
seriesCount := 0 seriesCount := 0
for css.Next() { for css.Next() {
@ -2866,7 +2869,9 @@ func TestLabelsValuesWithMatchersOptimization(t *testing.T) {
require.NoError(t, h.Close()) require.NoError(t, h.Close())
}() }()
app := h.Appender(context.Background()) ctx := context.Background()
app := h.Appender(ctx)
addSeries := func(l labels.Labels) { addSeries := func(l labels.Labels) {
app.Append(0, l, 0, 0) app.Append(0, l, 0, 0)
} }
@ -2905,7 +2910,7 @@ func TestLabelsValuesWithMatchersOptimization(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
values, err := labelValuesWithMatchers(ir, c.labelName, c.matchers...) values, err := labelValuesWithMatchers(ctx, ir, c.labelName, c.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.ElementsMatch(t, c.expectedResults, values) require.ElementsMatch(t, c.expectedResults, values)
}) })

View file

@ -14,6 +14,7 @@
package tsdb package tsdb
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -28,6 +29,7 @@ import (
func TestRepairBadIndexVersion(t *testing.T) { func TestRepairBadIndexVersion(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
ctx := context.Background()
// The broken index used in this test was written by the following script // The broken index used in this test was written by the following script
// at a broken revision. // at a broken revision.
@ -78,7 +80,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
// Read current index to check integrity. // Read current index to check integrity.
r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) r, err := index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
require.NoError(t, err) require.NoError(t, err)
p, err := r.Postings("b", "1") p, err := r.Postings(ctx, "b", "1")
require.NoError(t, err) require.NoError(t, err)
var builder labels.ScratchBuilder var builder labels.ScratchBuilder
for p.Next() { for p.Next() {
@ -97,7 +99,7 @@ func TestRepairBadIndexVersion(t *testing.T) {
r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename)) r, err = index.NewFileReader(filepath.Join(tmpDbDir, indexFilename))
require.NoError(t, err) require.NoError(t, err)
defer r.Close() defer r.Close()
p, err = r.Postings("b", "1") p, err = r.Postings(ctx, "b", "1")
require.NoError(t, err) require.NoError(t, err)
res := []labels.Labels{} res := []labels.Labels{}

View file

@ -0,0 +1,165 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package annotations
import (
"errors"
"fmt"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/promql/parser/posrange"
)
// Annotations is a general wrapper for warnings and other information
// that is returned by the query API along with the results.
// Each individual annotation is modeled by a Go error.
// They are deduplicated based on the string returned by error.Error().
// The zero value is usable without further initialization, see New().
type Annotations map[string]error
// New returns new Annotations ready to use. Note that the zero value of
// Annotations is also fully usable, but using this method is often more
// readable.
func New() *Annotations {
return &Annotations{}
}
// Add adds an annotation (modeled as a Go error) in-place and returns the
// modified Annotations for convenience.
func (a *Annotations) Add(err error) Annotations {
if *a == nil {
*a = Annotations{}
}
(*a)[err.Error()] = err
return *a
}
// Merge adds the contents of the second annotation to the first, modifying
// the first in-place, and returns the merged first Annotation for convenience.
func (a *Annotations) Merge(aa Annotations) Annotations {
if *a == nil {
*a = Annotations{}
}
for key, val := range aa {
(*a)[key] = val
}
return *a
}
// AsErrors is a convenience function to return the annotations map as a slice
// of errors.
func (a Annotations) AsErrors() []error {
arr := make([]error, 0, len(a))
for _, err := range a {
arr = append(arr, err)
}
return arr
}
// AsStrings is a convenience function to return the annotations map as a slice
// of strings. The query string is used to get the line number and character offset
// positioning info of the elements which trigger an annotation. We limit the number
// of annotations returned here with maxAnnos (0 for no limit).
func (a Annotations) AsStrings(query string, maxAnnos int) []string {
arr := make([]string, 0, len(a))
for _, err := range a {
if maxAnnos > 0 && len(arr) >= maxAnnos {
break
}
anErr, ok := err.(annoErr)
if ok {
anErr.Query = query
err = anErr
}
arr = append(arr, err.Error())
}
if maxAnnos > 0 && len(a) > maxAnnos {
arr = append(arr, fmt.Sprintf("%d more annotations omitted", len(a)-maxAnnos))
}
return arr
}
//nolint:revive // Ignore ST1012
var (
// Currently there are only 2 types, warnings and info.
// For now, info are visually identical with warnings as we have not updated
// the API spec or the frontend to show a different kind of warning. But we
// make the distinction here to prepare for adding them in future.
PromQLInfo = errors.New("PromQL info")
PromQLWarning = errors.New("PromQL warning")
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for metric name", PromQLWarning)
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count:", PromQLInfo)
)
type annoErr struct {
PositionRange posrange.PositionRange
Err error
Query string
}
func (e annoErr) Error() string {
return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0))
}
// NewInvalidQuantileWarning is used when the user specifies an invalid quantile
// value, i.e. a float that is outside the range [0, 1] or NaN.
func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q),
}
}
// NewBadBucketLabelWarning is used when there is an error parsing the bucket label
// of a classic histogram.
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName),
}
}
// NewMixedFloatsHistogramsWarning is used when the queried series includes both
// float samples and histogram samples for functions that do not support mixed
// samples.
func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", MixedFloatsHistogramsWarning, metricName),
}
}
// NewMixedClassicNativeHistogramsWarning is used when the queried series includes
// both classic and native histograms.
func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName),
}
}
// NewPossibleNonCounterInfo is used when a counter metric does not have the suffixes
// _total, _sum or _count.
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) annoErr {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
}
}

View file

@ -51,6 +51,7 @@ import (
"github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
) )
@ -161,7 +162,7 @@ type Response struct {
type apiFuncResult struct { type apiFuncResult struct {
data interface{} data interface{}
err *apiError err *apiError
warnings storage.Warnings warnings annotations.Annotations
finalizer func() finalizer func()
} }
@ -170,7 +171,7 @@ type apiFunc func(r *http.Request) apiFuncResult
// TSDBAdminStats defines the tsdb interfaces used by the v1 API for admin operations as well as statistics. // TSDBAdminStats defines the tsdb interfaces used by the v1 API for admin operations as well as statistics.
type TSDBAdminStats interface { type TSDBAdminStats interface {
CleanTombstones() error CleanTombstones() error
Delete(mint, maxt int64, ms ...*labels.Matcher) error Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error
Snapshot(dir string, withHead bool) error Snapshot(dir string, withHead bool) error
Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error)
WALReplayStatus() (tsdb.WALReplayStatus, error) WALReplayStatus() (tsdb.WALReplayStatus, error)
@ -337,7 +338,7 @@ func (api *API) Register(r *route.Router) {
} }
if result.data != nil { if result.data != nil {
api.respond(w, r, result.data, result.warnings) api.respond(w, r, result.data, result.warnings, r.FormValue("query"))
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
@ -659,7 +660,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
} }
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil { if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil} return apiFuncResult{nil, returnAPIError(err), nil, nil}
} }
@ -667,18 +668,18 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
var ( var (
names []string names []string
warnings storage.Warnings warnings annotations.Annotations
) )
if len(matcherSets) > 0 { if len(matcherSets) > 0 {
labelNamesSet := make(map[string]struct{}) labelNamesSet := make(map[string]struct{})
for _, matchers := range matcherSets { for _, matchers := range matcherSets {
vals, callWarnings, err := q.LabelNames(matchers...) vals, callWarnings, err := q.LabelNames(r.Context(), matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, nil} return apiFuncResult{nil, returnAPIError(err), warnings, nil}
} }
warnings = append(warnings, callWarnings...) warnings.Merge(callWarnings)
for _, val := range vals { for _, val := range vals {
labelNamesSet[val] = struct{}{} labelNamesSet[val] = struct{}{}
} }
@ -691,7 +692,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
} }
slices.Sort(names) slices.Sort(names)
} else { } else {
names, warnings, err = q.LabelNames() names, warnings, err = q.LabelNames(r.Context())
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
} }
@ -725,7 +726,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
} }
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
} }
@ -743,17 +744,17 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
var ( var (
vals []string vals []string
warnings storage.Warnings warnings annotations.Annotations
) )
if len(matcherSets) > 0 { if len(matcherSets) > 0 {
var callWarnings storage.Warnings var callWarnings annotations.Annotations
labelValuesSet := make(map[string]struct{}) labelValuesSet := make(map[string]struct{})
for _, matchers := range matcherSets { for _, matchers := range matcherSets {
vals, callWarnings, err = q.LabelValues(name, matchers...) vals, callWarnings, err = q.LabelValues(ctx, name, matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
} }
warnings = append(warnings, callWarnings...) warnings.Merge(callWarnings)
for _, val := range vals { for _, val := range vals {
labelValuesSet[val] = struct{}{} labelValuesSet[val] = struct{}{}
} }
@ -764,7 +765,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
vals = append(vals, val) vals = append(vals, val)
} }
} else { } else {
vals, warnings, err = q.LabelValues(name) vals, warnings, err = q.LabelValues(ctx, name)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
} }
@ -793,6 +794,8 @@ var (
) )
func (api *API) series(r *http.Request) (result apiFuncResult) { func (api *API) series(r *http.Request) (result apiFuncResult) {
ctx := r.Context()
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil} return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil}
} }
@ -814,7 +817,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
return invalidParamError(err, "match[]") return invalidParamError(err, "match[]")
} }
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil { if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil} return apiFuncResult{nil, returnAPIError(err), nil, nil}
} }
@ -841,13 +844,13 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
var sets []storage.SeriesSet var sets []storage.SeriesSet
for _, mset := range matcherSets { for _, mset := range matcherSets {
// We need to sort this select results to merge (deduplicate) the series sets later. // We need to sort this select results to merge (deduplicate) the series sets later.
s := q.Select(true, hints, mset...) s := q.Select(ctx, true, hints, mset...)
sets = append(sets, s) sets = append(sets, s)
} }
set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge) set = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
} else { } else {
// At this point at least one match exists. // At this point at least one match exists.
set = q.Select(false, hints, matcherSets[0]...) set = q.Select(ctx, false, hints, matcherSets[0]...)
} }
metrics := []labels.Labels{} metrics := []labels.Labels{}
@ -1577,7 +1580,7 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) {
Min: status.Min, Min: status.Min,
Max: status.Max, Max: status.Max,
Current: status.Current, Current: status.Current,
}, nil) }, nil, "")
} }
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
@ -1630,7 +1633,7 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult {
if err != nil { if err != nil {
return invalidParamError(err, "match[]") return invalidParamError(err, "match[]")
} }
if err := api.db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil { if err := api.db.Delete(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil {
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
} }
} }
@ -1683,17 +1686,15 @@ func (api *API) cleanTombstones(*http.Request) apiFuncResult {
return apiFuncResult{nil, nil, nil, nil} return apiFuncResult{nil, nil, nil, nil}
} }
func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings storage.Warnings) { // Query string is needed to get the position information for the annotations, and it
// can be empty if the position information isn't needed.
func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) {
statusMessage := statusSuccess statusMessage := statusSuccess
var warningStrings []string
for _, warning := range warnings {
warningStrings = append(warningStrings, warning.Error())
}
resp := &Response{ resp := &Response{
Status: statusMessage, Status: statusMessage,
Data: data, Data: data,
Warnings: warningStrings, Warnings: warnings.AsStrings(query, 10),
} }
codec, err := api.negotiateCodec(req, resp) codec, err := api.negotiateCodec(req, resp)

View file

@ -993,14 +993,14 @@ func setupRemote(s storage.Storage) *httptest.Server {
} }
} }
querier, err := s.Querier(r.Context(), query.StartTimestampMs, query.EndTimestampMs) querier, err := s.Querier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
defer querier.Close() defer querier.Close()
set := querier.Select(false, hints, matchers...) set := querier.Select(r.Context(), false, hints, matchers...)
resp.Results[i], _, err = remote.ToQueryResult(set, 1e6) resp.Results[i], _, err = remote.ToQueryResult(set, 1e6)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
@ -2767,9 +2767,9 @@ type fakeDB struct {
err error err error
} }
func (f *fakeDB) CleanTombstones() error { return f.err } func (f *fakeDB) CleanTombstones() error { return f.err }
func (f *fakeDB) Delete(int64, int64, ...*labels.Matcher) error { return f.err } func (f *fakeDB) Delete(context.Context, int64, int64, ...*labels.Matcher) error { return f.err }
func (f *fakeDB) Snapshot(string, bool) error { return f.err } func (f *fakeDB) Snapshot(string, bool) error { return f.err }
func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) { func (f *fakeDB) Stats(statsByLabelName string, limit int) (_ *tsdb.Stats, retErr error) {
dbDir, err := os.MkdirTemp("", "tsdb-api-ready") dbDir, err := os.MkdirTemp("", "tsdb-api-ready")
if err != nil { if err != nil {
@ -2985,7 +2985,7 @@ func TestRespondSuccess(t *testing.T) {
api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode-2"}, canEncode: true}) api.InstallCodec(&testCodec{contentType: MIMEType{"test", "can-encode-2"}, canEncode: true})
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
api.respond(w, r, "test", nil) api.respond(w, r, "test", nil, "")
})) }))
defer s.Close() defer s.Close()
@ -3074,7 +3074,7 @@ func TestRespondSuccess_DefaultCodecCannotEncodeResponse(t *testing.T) {
api.InstallCodec(&testCodec{contentType: MIMEType{"application", "default-format"}, canEncode: false}) api.InstallCodec(&testCodec{contentType: MIMEType{"application", "default-format"}, canEncode: false})
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
api.respond(w, r, "test", nil) api.respond(w, r, "test", nil, "")
})) }))
defer s.Close() defer s.Close()
@ -3473,7 +3473,7 @@ func BenchmarkRespond(b *testing.B) {
api := API{} api := API{}
api.InstallCodec(JSONCodec{}) api.InstallCodec(JSONCodec{})
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
api.respond(&testResponseWriter, request, c.response, nil) api.respond(&testResponseWriter, request, c.response, nil, "")
} }
}) })
} }

View file

@ -36,6 +36,7 @@ import (
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/annotations"
) )
func TestApiStatusCodes(t *testing.T) { func TestApiStatusCodes(t *testing.T) {
@ -154,11 +155,11 @@ func (t errorTestQueryable) ExemplarQuerier(ctx context.Context) (storage.Exempl
return nil, t.err return nil, t.err
} }
func (t errorTestQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { func (t errorTestQueryable) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
return nil, t.err return nil, t.err
} }
func (t errorTestQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { func (t errorTestQueryable) Querier(mint, maxt int64) (storage.Querier, error) {
if t.q != nil { if t.q != nil {
return t.q, nil return t.q, nil
} }
@ -170,11 +171,11 @@ type errorTestQuerier struct {
err error err error
} }
func (t errorTestQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err return nil, nil, t.err
} }
func (t errorTestQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err return nil, nil, t.err
} }
@ -182,7 +183,7 @@ func (t errorTestQuerier) Close() error {
return nil return nil
} }
func (t errorTestQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { func (t errorTestQuerier) Select(_ context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
if t.s != nil { if t.s != nil {
return t.s return t.s
} }
@ -205,7 +206,7 @@ func (t errorTestSeriesSet) Err() error {
return t.err return t.err
} }
func (t errorTestSeriesSet) Warnings() storage.Warnings { func (t errorTestSeriesSet) Warnings() annotations.Annotations {
return nil return nil
} }

View file

@ -57,6 +57,8 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
h.mtx.RLock() h.mtx.RLock()
defer h.mtx.RUnlock() defer h.mtx.RUnlock()
ctx := req.Context()
if err := req.ParseForm(); err != nil { if err := req.ParseForm(); err != nil {
http.Error(w, fmt.Sprintf("error parsing form values: %v", err), http.StatusBadRequest) http.Error(w, fmt.Sprintf("error parsing form values: %v", err), http.StatusBadRequest)
return return
@ -80,7 +82,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
) )
w.Header().Set("Content-Type", string(format)) w.Header().Set("Content-Type", string(format))
q, err := h.localStorage.Querier(req.Context(), mint, maxt) q, err := h.localStorage.Querier(mint, maxt)
if err != nil { if err != nil {
federationErrors.Inc() federationErrors.Inc()
if errors.Cause(err) == tsdb.ErrNotReady { if errors.Cause(err) == tsdb.ErrNotReady {
@ -98,7 +100,7 @@ func (h *Handler) federation(w http.ResponseWriter, req *http.Request) {
var sets []storage.SeriesSet var sets []storage.SeriesSet
for _, mset := range matcherSets { for _, mset := range matcherSets {
s := q.Select(true, hints, mset...) s := q.Select(ctx, true, hints, mset...)
sets = append(sets, s) sets = append(sets, s)
} }

View file

@ -237,7 +237,7 @@ type notReadyReadStorage struct {
LocalStorage LocalStorage
} }
func (notReadyReadStorage) Querier(context.Context, int64, int64) (storage.Querier, error) { func (notReadyReadStorage) Querier(int64, int64) (storage.Querier, error) {
return nil, errors.Wrap(tsdb.ErrNotReady, "wrap") return nil, errors.Wrap(tsdb.ErrNotReady, "wrap")
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.46.0", "version": "0.47.0",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,7 +29,7 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.46.0", "@prometheus-io/lezer-promql": "0.47.0",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.46.0", "version": "0.47.0",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",

View file

@ -30,10 +30,10 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.46.0", "version": "0.47.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.46.0", "@prometheus-io/lezer-promql": "0.47.0",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {
@ -70,7 +70,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.46.0", "version": "0.47.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.2.3", "@lezer/generator": "^1.2.3",
@ -20770,7 +20770,7 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.46.0", "version": "0.47.0",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.7.1", "@codemirror/autocomplete": "^6.7.1",
"@codemirror/commands": "^6.2.4", "@codemirror/commands": "^6.2.4",
@ -20788,7 +20788,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.46.0", "@prometheus-io/codemirror-promql": "0.47.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.0", "downshift": "^7.6.0",
@ -23428,7 +23428,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.46.0", "@prometheus-io/codemirror-promql": "0.47.0",
"@testing-library/react-hooks": "^7.0.2", "@testing-library/react-hooks": "^7.0.2",
"@types/enzyme": "^3.10.13", "@types/enzyme": "^3.10.13",
"@types/flot": "0.0.32", "@types/flot": "0.0.32",
@ -23492,7 +23492,7 @@
"@lezer/common": "^1.0.3", "@lezer/common": "^1.0.3",
"@lezer/highlight": "^1.1.6", "@lezer/highlight": "^1.1.6",
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@prometheus-io/lezer-promql": "0.46.0", "@prometheus-io/lezer-promql": "0.47.0",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"lru-cache": "^7.18.3", "lru-cache": "^7.18.3",
"nock": "^13.3.1" "nock": "^13.3.1"

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.46.0", "version": "0.47.0",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.7.1", "@codemirror/autocomplete": "^6.7.1",
@ -19,7 +19,7 @@
"@lezer/lr": "^1.3.6", "@lezer/lr": "^1.3.6",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.46.0", "@prometheus-io/codemirror-promql": "0.47.0",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^7.6.0", "downshift": "^7.6.0",

View file

@ -35,12 +35,7 @@ const ScrapePoolContentTable: FC<InfiniteScrollItemsProps<Target>> = ({ items })
<Badge color={getColor(target.health)}>{target.health.toUpperCase()}</Badge> <Badge color={getColor(target.health)}>{target.health.toUpperCase()}</Badge>
</td> </td>
<td className={styles.labels}> <td className={styles.labels}>
<TargetLabels <TargetLabels discoveredLabels={target.discoveredLabels} labels={target.labels} />
discoveredLabels={target.discoveredLabels}
labels={target.labels}
scrapePool={target.scrapePool}
idx={index}
/>
</td> </td>
<td className={styles['last-scrape']}>{formatRelative(target.lastScrape, now())}</td> <td className={styles['last-scrape']}>{formatRelative(target.lastScrape, now())}</td>
<td className={styles['scrape-duration']}> <td className={styles['scrape-duration']}>

View file

@ -1,3 +0,0 @@
.discovered {
white-space: nowrap;
}

View file

@ -17,15 +17,12 @@ describe('targetLabels', () => {
job: 'node_exporter', job: 'node_exporter',
foo: 'bar', foo: 'bar',
}, },
idx: 1,
scrapePool: 'cortex/node-exporter_group/0',
}; };
const targetLabels = shallow(<TargetLabels {...defaultProps} />); const targetLabels = shallow(<TargetLabels {...defaultProps} />);
it('renders a div of series labels', () => { it('renders a div of series labels', () => {
const div = targetLabels.find('div').filterWhere((elem) => elem.hasClass('series-labels-container')); const div = targetLabels.find('div').filterWhere((elem) => elem.hasClass('series-labels-container'));
expect(div).toHaveLength(1); expect(div).toHaveLength(1);
expect(div.prop('id')).toEqual('series-labels-cortex/node-exporter_group/0-1');
}); });
it('wraps each label in a label badge', () => { it('wraps each label in a label badge', () => {
@ -38,15 +35,4 @@ describe('targetLabels', () => {
}); });
expect(targetLabels.find(Badge)).toHaveLength(3); expect(targetLabels.find(Badge)).toHaveLength(3);
}); });
it('renders a tooltip for discovered labels', () => {
const tooltip = targetLabels.find(Tooltip);
expect(tooltip).toHaveLength(1);
expect(tooltip.prop('isOpen')).toBe(false);
expect(tooltip.prop('target')).toEqual('series-labels-cortex\\/node-exporter_group\\/0-1');
});
it('renders discovered labels', () => {
expect(toJson(targetLabels)).toMatchSnapshot();
});
}); });

View file

@ -1,7 +1,7 @@
import React, { FC, Fragment, useState } from 'react'; import { faChevronDown, faChevronUp } from '@fortawesome/free-solid-svg-icons';
import { Badge, Tooltip } from 'reactstrap'; import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import 'css.escape'; import React, { FC, useState } from 'react';
import styles from './TargetLabels.module.css'; import { Badge, Button } from 'reactstrap';
interface Labels { interface Labels {
[key: string]: string; [key: string]: string;
@ -10,21 +10,14 @@ interface Labels {
export interface TargetLabelsProps { export interface TargetLabelsProps {
discoveredLabels: Labels; discoveredLabels: Labels;
labels: Labels; labels: Labels;
idx: number;
scrapePool: string;
} }
const formatLabels = (labels: Labels): string[] => Object.keys(labels).map((key) => `${key}="${labels[key]}"`); const TargetLabels: FC<TargetLabelsProps> = ({ discoveredLabels, labels }) => {
const [showDiscovered, setShowDiscovered] = useState(false);
const TargetLabels: FC<TargetLabelsProps> = ({ discoveredLabels, labels, idx, scrapePool }) => {
const [tooltipOpen, setTooltipOpen] = useState(false);
const toggle = (): void => setTooltipOpen(!tooltipOpen);
const id = `series-labels-${scrapePool}-${idx}`;
return ( return (
<> <>
<div id={id} className="series-labels-container"> <div className="series-labels-container">
{Object.keys(labels).map((labelName) => { {Object.keys(labels).map((labelName) => {
return ( return (
<Badge color="primary" className="mr-1" key={labelName}> <Badge color="primary" className="mr-1" key={labelName}>
@ -32,22 +25,28 @@ const TargetLabels: FC<TargetLabelsProps> = ({ discoveredLabels, labels, idx, sc
</Badge> </Badge>
); );
})} })}
<Button
size="sm"
color="link"
title={`${showDiscovered ? 'Hide' : 'Show'} discovered (pre-relabeling) labels`}
onClick={() => setShowDiscovered(!showDiscovered)}
style={{ fontSize: '0.8rem' }}
>
<FontAwesomeIcon icon={showDiscovered ? faChevronUp : faChevronDown} />
</Button>
</div> </div>
<Tooltip {showDiscovered && (
isOpen={tooltipOpen} <>
target={CSS.escape(id)} <div className="mt-3 font-weight-bold">Discovered labels:</div>
toggle={toggle} {Object.keys(discoveredLabels).map((labelName) => (
placement={'right-end'} <div key={labelName}>
style={{ maxWidth: 'none', textAlign: 'left' }} <Badge color="info" className="mr-1">
> {`${labelName}="${discoveredLabels[labelName]}"`}
<b>Before relabeling:</b> </Badge>
{formatLabels(discoveredLabels).map((s: string, labelIndex: number) => ( </div>
<Fragment key={labelIndex}> ))}
<br /> </>
<span className={styles.discovered}>{s}</span> )}
</Fragment>
))}
</Tooltip>
</> </>
); );
}; };

Some files were not shown because too many files have changed in this diff Show more