2017-04-10 11:59:45 -07:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-12-10 09:08:50 -08:00
|
|
|
package tsdb
|
|
|
|
|
2016-12-13 06:26:58 -08:00
|
|
|
import (
|
2023-09-12 03:37:38 -07:00
|
|
|
"context"
|
2023-11-16 10:54:41 -08:00
|
|
|
"errors"
|
2021-11-28 23:54:23 -08:00
|
|
|
"fmt"
|
2020-08-03 03:32:56 -07:00
|
|
|
"math"
|
2016-12-14 06:39:23 -08:00
|
|
|
"strings"
|
2019-05-27 04:24:46 -07:00
|
|
|
"unicode/utf8"
|
2016-12-13 06:26:58 -08:00
|
|
|
|
2022-11-28 00:12:54 -08:00
|
|
|
"github.com/oklog/ulid"
|
2023-08-29 02:03:27 -07:00
|
|
|
"golang.org/x/exp/slices"
|
2020-10-22 02:00:08 -07:00
|
|
|
|
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2020-02-06 07:58:38 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2019-08-13 01:34:14 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
2020-08-03 03:32:56 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
2019-08-13 01:34:14 -07:00
|
|
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2019-09-19 02:15:41 -07:00
|
|
|
"github.com/prometheus/prometheus/tsdb/tombstones"
|
2023-09-14 09:57:31 -07:00
|
|
|
"github.com/prometheus/prometheus/util/annotations"
|
2016-12-13 06:26:58 -08:00
|
|
|
)
|
2016-12-12 10:12:55 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
|
|
|
|
var regexMetaCharacterBytes [16]byte
|
2017-11-13 03:16:58 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// isRegexMetaCharacter reports whether byte b needs to be escaped.
|
|
|
|
func isRegexMetaCharacter(b byte) bool {
|
|
|
|
return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
|
2016-12-20 04:10:37 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func init() {
|
|
|
|
for _, b := range []byte(`.+*?()|[]{}^$`) {
|
|
|
|
regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
|
2016-12-28 02:41:44 -08:00
|
|
|
}
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
type blockBaseQuerier struct {
|
2022-11-28 00:12:54 -08:00
|
|
|
blockID ulid.ULID
|
2020-07-31 08:03:02 -07:00
|
|
|
index IndexReader
|
|
|
|
chunks ChunkReader
|
|
|
|
tombstones tombstones.Reader
|
2020-01-17 03:21:44 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
closed bool
|
2019-02-14 05:29:41 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
mint, maxt int64
|
2019-02-14 05:29:41 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, error) {
|
2020-03-25 12:13:47 -07:00
|
|
|
indexr, err := b.Index()
|
2017-10-09 06:21:46 -07:00
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("open index reader: %w", err)
|
2017-08-25 01:32:54 -07:00
|
|
|
}
|
2017-10-09 06:21:46 -07:00
|
|
|
chunkr, err := b.Chunks()
|
|
|
|
if err != nil {
|
2017-10-23 11:30:03 -07:00
|
|
|
indexr.Close()
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("open chunk reader: %w", err)
|
2017-10-09 06:21:46 -07:00
|
|
|
}
|
|
|
|
tombsr, err := b.Tombstones()
|
|
|
|
if err != nil {
|
2017-10-23 11:30:03 -07:00
|
|
|
indexr.Close()
|
|
|
|
chunkr.Close()
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("open tombstone reader: %w", err)
|
2017-10-09 06:21:46 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
|
|
|
|
if tombsr == nil {
|
|
|
|
tombsr = tombstones.NewMemTombstones()
|
|
|
|
}
|
|
|
|
return &blockBaseQuerier{
|
2022-11-28 00:12:54 -08:00
|
|
|
blockID: b.Meta().ULID,
|
2017-10-09 06:21:46 -07:00
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
index: indexr,
|
|
|
|
chunks: chunkr,
|
|
|
|
tombstones: tombsr,
|
|
|
|
}, nil
|
2017-08-25 01:32:54 -07:00
|
|
|
}
|
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
2023-09-14 07:02:04 -07:00
|
|
|
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
|
2020-07-31 08:03:02 -07:00
|
|
|
return res, nil, err
|
|
|
|
}
|
2016-12-14 09:38:46 -08:00
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
2023-09-14 01:39:51 -07:00
|
|
|
res, err := q.index.LabelNames(ctx, matchers...)
|
2020-07-31 08:03:02 -07:00
|
|
|
return res, nil, err
|
|
|
|
}
|
2019-04-30 00:17:07 -07:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
func (q *blockBaseQuerier) Close() error {
|
|
|
|
if q.closed {
|
|
|
|
return errors.New("block querier already closed")
|
|
|
|
}
|
2020-10-28 08:24:58 -07:00
|
|
|
|
|
|
|
errs := tsdb_errors.NewMulti(
|
|
|
|
q.index.Close(),
|
|
|
|
q.chunks.Close(),
|
|
|
|
q.tombstones.Close(),
|
|
|
|
)
|
2020-07-31 08:03:02 -07:00
|
|
|
q.closed = true
|
2020-10-28 08:24:58 -07:00
|
|
|
return errs.Err()
|
2016-12-14 09:38:46 -08:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
type blockQuerier struct {
|
|
|
|
*blockBaseQuerier
|
|
|
|
}
|
2020-02-06 07:58:38 -08:00
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// NewBlockQuerier returns a querier against the block reader and requested min and max time range.
|
|
|
|
func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) {
|
|
|
|
q, err := newBlockBaseQuerier(b, mint, maxt)
|
2020-01-17 03:21:44 -08:00
|
|
|
if err != nil {
|
2020-07-31 08:03:02 -07:00
|
|
|
return nil, err
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
return &blockQuerier{blockBaseQuerier: q}, nil
|
|
|
|
}
|
2020-02-06 07:58:38 -08:00
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet {
|
2020-02-06 07:58:38 -08:00
|
|
|
mint := q.mint
|
|
|
|
maxt := q.maxt
|
2021-11-03 03:08:34 -07:00
|
|
|
disableTrimming := false
|
2024-01-29 03:57:27 -08:00
|
|
|
sharded := hints != nil && hints.ShardCount > 0
|
2021-11-03 03:08:34 -07:00
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
p, err := PostingsForMatchers(ctx, q.index, ms...)
|
2020-07-31 08:03:02 -07:00
|
|
|
if err != nil {
|
|
|
|
return storage.ErrSeriesSet(err)
|
2020-06-09 09:57:31 -07:00
|
|
|
}
|
2024-01-29 03:57:27 -08:00
|
|
|
if sharded {
|
|
|
|
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
if sortSeries {
|
|
|
|
p = q.index.SortedPostings(p)
|
|
|
|
}
|
2020-10-14 02:06:17 -07:00
|
|
|
|
|
|
|
if hints != nil {
|
|
|
|
mint = hints.Start
|
|
|
|
maxt = hints.End
|
2021-11-03 03:08:34 -07:00
|
|
|
disableTrimming = hints.DisableTrimming
|
2020-10-14 02:06:17 -07:00
|
|
|
if hints.Func == "series" {
|
|
|
|
// When you're only looking up metadata (for example series API), you don't need to load any chunks.
|
2021-11-03 03:08:34 -07:00
|
|
|
return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming)
|
2020-10-14 02:06:17 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-03 03:08:34 -07:00
|
|
|
return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
|
2017-05-13 08:43:25 -07:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// blockChunkQuerier provides chunk querying access to a single block database.
|
|
|
|
type blockChunkQuerier struct {
|
|
|
|
*blockBaseQuerier
|
2017-05-13 08:43:25 -07:00
|
|
|
}
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
// NewBlockChunkQuerier returns a chunk querier against the block reader and requested min and max time range.
|
|
|
|
func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
|
|
|
|
q, err := newBlockBaseQuerier(b, mint, maxt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-04-30 00:17:07 -07:00
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
return &blockChunkQuerier{blockBaseQuerier: q}, nil
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
|
2020-07-31 08:03:02 -07:00
|
|
|
mint := q.mint
|
|
|
|
maxt := q.maxt
|
2021-11-03 03:08:34 -07:00
|
|
|
disableTrimming := false
|
2024-01-29 03:57:27 -08:00
|
|
|
sharded := hints != nil && hints.ShardCount > 0
|
|
|
|
|
2020-07-31 08:03:02 -07:00
|
|
|
if hints != nil {
|
|
|
|
mint = hints.Start
|
|
|
|
maxt = hints.End
|
2021-11-03 03:08:34 -07:00
|
|
|
disableTrimming = hints.DisableTrimming
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
2023-05-10 19:53:35 -07:00
|
|
|
p, err := PostingsForMatchers(ctx, q.index, ms...)
|
2020-07-31 08:03:02 -07:00
|
|
|
if err != nil {
|
|
|
|
return storage.ErrChunkSeriesSet(err)
|
|
|
|
}
|
2024-01-29 03:57:27 -08:00
|
|
|
if sharded {
|
|
|
|
p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
|
|
|
|
}
|
2020-07-31 08:03:02 -07:00
|
|
|
if sortSeries {
|
|
|
|
p = q.index.SortedPostings(p)
|
|
|
|
}
|
2023-04-03 23:31:49 -07:00
|
|
|
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func findSetMatches(pattern string) []string {
|
|
|
|
// Return empty matches if the wrapper from Prometheus is missing.
|
|
|
|
if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped := false
|
2019-08-13 01:34:14 -07:00
|
|
|
sets := []*strings.Builder{{}}
|
2023-05-30 04:49:22 -07:00
|
|
|
init := 4
|
|
|
|
end := len(pattern) - 2
|
|
|
|
// If the regex is wrapped in a group we can remove the first and last parentheses
|
|
|
|
if pattern[init] == '(' && pattern[end-1] == ')' {
|
|
|
|
init++
|
|
|
|
end--
|
|
|
|
}
|
|
|
|
for i := init; i < end; i++ {
|
2019-05-27 04:24:46 -07:00
|
|
|
if escaped {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
sets[len(sets)-1].WriteByte('\\')
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
escaped = false
|
|
|
|
} else {
|
|
|
|
switch {
|
|
|
|
case isRegexMetaCharacter(pattern[i]):
|
|
|
|
if pattern[i] == '|' {
|
|
|
|
sets = append(sets, &strings.Builder{})
|
|
|
|
} else {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case pattern[i] == '\\':
|
|
|
|
escaped = true
|
|
|
|
default:
|
|
|
|
sets[len(sets)-1].WriteByte(pattern[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
matches := make([]string, 0, len(sets))
|
|
|
|
for _, s := range sets {
|
|
|
|
if s.Len() > 0 {
|
|
|
|
matches = append(matches, s.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return matches
|
|
|
|
}
|
|
|
|
|
2017-11-13 04:57:10 -08:00
|
|
|
// PostingsForMatchers assembles a single postings iterator against the index reader
|
2020-01-17 03:21:44 -08:00
|
|
|
// based on the given matchers. The resulting postings are not ordered by series.
|
2023-05-10 19:53:35 -07:00
|
|
|
func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
var its, notIts []index.Postings
|
|
|
|
// See which label must be non-empty.
|
2019-09-13 08:10:35 -07:00
|
|
|
// Optimization for case like {l=~".", l!="1"}.
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
labelMustBeSet := make(map[string]bool, len(ms))
|
|
|
|
for _, m := range ms {
|
|
|
|
if !m.Matches("") {
|
2019-11-18 11:53:33 -08:00
|
|
|
labelMustBeSet[m.Name] = true
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
}
|
|
|
|
}
|
2023-08-29 02:03:27 -07:00
|
|
|
isSubtractingMatcher := func(m *labels.Matcher) bool {
|
|
|
|
if !labelMustBeSet[m.Name] {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return (m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp) && m.Matches("")
|
|
|
|
}
|
|
|
|
hasSubtractingMatchers, hasIntersectingMatchers := false, false
|
|
|
|
for _, m := range ms {
|
|
|
|
if isSubtractingMatcher(m) {
|
|
|
|
hasSubtractingMatchers = true
|
|
|
|
} else {
|
|
|
|
hasIntersectingMatchers = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasSubtractingMatchers && !hasIntersectingMatchers {
|
|
|
|
// If there's nothing to subtract from, add in everything and remove the notIts later.
|
|
|
|
// We prefer to get AllPostings so that the base of subtraction (i.e. allPostings)
|
|
|
|
// doesn't include series that may be added to the index reader during this function call.
|
|
|
|
k, v := index.AllPostingsKey()
|
2023-05-10 19:53:35 -07:00
|
|
|
allPostings, err := ix.Postings(ctx, k, v)
|
2023-08-29 02:03:27 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, allPostings)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort matchers to have the intersecting matchers first.
|
|
|
|
// This way the base for subtraction is smaller and
|
|
|
|
// there is no chance that the set we subtract from
|
|
|
|
// contains postings of series that didn't exist when
|
|
|
|
// we constructed the set we subtract by.
|
2023-09-21 13:53:51 -07:00
|
|
|
slices.SortStableFunc(ms, func(i, j *labels.Matcher) int {
|
|
|
|
if !isSubtractingMatcher(i) && isSubtractingMatcher(j) {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
|
|
|
return +1
|
2023-08-29 02:03:27 -07:00
|
|
|
})
|
2017-11-30 06:34:49 -08:00
|
|
|
|
2017-05-13 08:43:25 -07:00
|
|
|
for _, m := range ms {
|
2023-05-10 19:53:35 -07:00
|
|
|
if ctx.Err() != nil {
|
|
|
|
return nil, ctx.Err()
|
|
|
|
}
|
2023-04-09 00:08:40 -07:00
|
|
|
switch {
|
|
|
|
case m.Name == "" && m.Value == "": // Special-case for AllPostings, used in tests at least.
|
2023-01-05 06:05:29 -08:00
|
|
|
k, v := index.AllPostingsKey()
|
2023-05-10 19:53:35 -07:00
|
|
|
allPostings, err := ix.Postings(ctx, k, v)
|
2023-01-05 06:05:29 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
its = append(its, allPostings)
|
2023-04-09 00:08:40 -07:00
|
|
|
case labelMustBeSet[m.Name]:
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// If this matcher must be non-empty, we can be smarter.
|
2019-09-13 08:10:35 -07:00
|
|
|
matchesEmpty := m.Matches("")
|
2019-11-18 11:53:33 -08:00
|
|
|
isNot := m.Type == labels.MatchNotEqual || m.Type == labels.MatchNotRegexp
|
2023-04-09 00:08:40 -07:00
|
|
|
switch {
|
|
|
|
case isNot && matchesEmpty: // l!="foo"
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// If the label can't be empty and is a Not and the inner matcher
|
|
|
|
// doesn't match empty, then subtract it out at the end.
|
2019-11-18 11:53:33 -08:00
|
|
|
inverse, err := m.Inverse()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
it, err := postingsForMatcher(ctx, ix, inverse)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
2023-04-09 00:08:40 -07:00
|
|
|
case isNot && !matchesEmpty: // l!=""
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// If the label can't be empty and is a Not, but the inner matcher can
|
|
|
|
// be empty we need to use inversePostingsForMatcher.
|
2019-11-18 11:53:33 -08:00
|
|
|
inverse, err := m.Inverse()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
it, err := inversePostingsForMatcher(ctx, ix, inverse)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-01-10 01:51:49 -08:00
|
|
|
if index.IsEmptyPostingsType(it) {
|
|
|
|
return index.EmptyPostings(), nil
|
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
its = append(its, it)
|
2023-04-09 00:08:40 -07:00
|
|
|
default: // l="a"
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// Non-Not matcher, use normal postingsForMatcher.
|
2023-05-10 19:53:35 -07:00
|
|
|
it, err := postingsForMatcher(ctx, ix, m)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-01-10 01:51:49 -08:00
|
|
|
if index.IsEmptyPostingsType(it) {
|
|
|
|
return index.EmptyPostings(), nil
|
|
|
|
}
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
its = append(its, it)
|
|
|
|
}
|
2023-04-09 00:08:40 -07:00
|
|
|
default: // l=""
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// If the matchers for a labelname selects an empty value, it selects all
|
|
|
|
// the series which don't have the label name set too. See:
|
|
|
|
// https://github.com/prometheus/prometheus/issues/3575 and
|
|
|
|
// https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555
|
2023-05-10 19:53:35 -07:00
|
|
|
it, err := inversePostingsForMatcher(ctx, ix, m)
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notIts = append(notIts, it)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
it := index.Intersect(its...)
|
|
|
|
|
|
|
|
for _, n := range notIts {
|
|
|
|
it = index.Without(it, n)
|
|
|
|
}
|
|
|
|
|
2020-01-17 03:21:44 -08:00
|
|
|
return it, nil
|
2017-05-13 08:43:25 -07:00
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
|
Be smarter in how we look at matchers. (#572)
* Add unittests for PostingsForMatcher.
* Selector methods are all stateless, don't need a reference.
* Be smarter in how we look at matchers.
Look at all matchers to see if a label can be empty.
Optimise Not handling, so i!="2" is a simple lookup
rather than an inverse postings list.
All all the Withouts together, rather than
having to subtract each from all postings.
Change the pre-expand the postings logic to always do it before doing a
Without only. Don't do that if it's already a list.
The initial goal here was that the oft-seen pattern
i=~"something.+",i!="foo",i!="bar" becomes more efficient.
benchmark old ns/op new ns/op delta
BenchmarkHeadPostingForMatchers/n="1"-4 5888 6160 +4.62%
BenchmarkHeadPostingForMatchers/n="1",j="foo"-4 7190 6640 -7.65%
BenchmarkHeadPostingForMatchers/j="foo",n="1"-4 6038 5923 -1.90%
BenchmarkHeadPostingForMatchers/n="1",j!="foo"-4 6030884 4850525 -19.57%
BenchmarkHeadPostingForMatchers/i=~".*"-4 887377940 230329137 -74.04%
BenchmarkHeadPostingForMatchers/i=~".+"-4 490316101 319931758 -34.75%
BenchmarkHeadPostingForMatchers/i=~""-4 594961991 130279313 -78.10%
BenchmarkHeadPostingForMatchers/i!=""-4 537542388 318751015 -40.70%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",j="foo"-4 10460243 8565195 -18.12%
BenchmarkHeadPostingForMatchers/n="1",i=~".*",i!="2",j="foo"-4 44964267 8561546 -80.96%
BenchmarkHeadPostingForMatchers/n="1",i!="",j="foo"-4 42244885 29137737 -31.03%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",j="foo"-4 35285834 32774584 -7.12%
BenchmarkHeadPostingForMatchers/n="1",i=~"1.+",j="foo"-4 8951047 8379024 -6.39%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!="2",j="foo"-4 63813335 30672688 -51.93%
BenchmarkHeadPostingForMatchers/n="1",i=~".+",i!~"2.*",j="foo"-4 45381112 44924397 -1.01%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2019-04-09 03:59:45 -07:00
|
|
|
// This method will not return postings for missing labels.
|
2017-12-17 10:08:21 -08:00
|
|
|
|
2017-04-05 05:14:30 -07:00
|
|
|
// Fast-path for equal matching.
|
2019-11-18 11:53:33 -08:00
|
|
|
if m.Type == labels.MatchEqual {
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, m.Value)
|
2017-04-05 05:14:30 -07:00
|
|
|
}
|
|
|
|
|
2019-05-27 04:24:46 -07:00
|
|
|
// Fast-path for set matching.
|
2019-11-18 11:53:33 -08:00
|
|
|
if m.Type == labels.MatchRegexp {
|
2020-02-05 02:53:12 -08:00
|
|
|
setMatches := findSetMatches(m.GetRegexString())
|
2019-05-27 04:24:46 -07:00
|
|
|
if len(setMatches) > 0 {
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, setMatches...)
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
vals, err := ix.LabelValues(ctx, m.Name)
|
2016-12-14 09:38:46 -08:00
|
|
|
if err != nil {
|
2017-11-13 03:16:58 -08:00
|
|
|
return nil, err
|
2016-12-14 09:38:46 -08:00
|
|
|
}
|
2017-05-13 08:43:25 -07:00
|
|
|
|
2016-12-14 09:38:46 -08:00
|
|
|
var res []string
|
Replace StringTuples with []string
Benchmarks show slight cpu/allocs improvements.
benchmark old ns/op new ns/op delta
BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42%
BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78%
BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37%
BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89%
BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82%
BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19%
benchmark old allocs new allocs delta
BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00%
benchmark old bytes new bytes delta
BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00%
BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00%
BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00%
BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00%
BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00%
BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00%
BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00%
BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18%
BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00%
BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00%
BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00%
BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01%
BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00%
BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2020-01-01 03:38:01 -08:00
|
|
|
for _, val := range vals {
|
2020-01-01 03:21:42 -08:00
|
|
|
if m.Matches(val) {
|
|
|
|
res = append(res, val)
|
2016-12-14 09:38:46 -08:00
|
|
|
}
|
|
|
|
}
|
2017-05-13 08:43:25 -07:00
|
|
|
|
2016-12-14 09:38:46 -08:00
|
|
|
if len(res) == 0 {
|
2017-11-30 06:34:49 -08:00
|
|
|
return index.EmptyPostings(), nil
|
2016-12-14 09:38:46 -08:00
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, res...)
|
2016-12-14 09:38:46 -08:00
|
|
|
}
|
|
|
|
|
2019-09-13 08:10:35 -07:00
|
|
|
// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher.
|
2023-05-10 19:53:35 -07:00
|
|
|
func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) (index.Postings, error) {
|
2023-05-21 01:41:30 -07:00
|
|
|
// Fast-path for MatchNotRegexp matching.
|
|
|
|
// Inverse of a MatchNotRegexp is MatchRegexp (double negation).
|
|
|
|
// Fast-path for set matching.
|
|
|
|
if m.Type == labels.MatchNotRegexp {
|
|
|
|
setMatches := findSetMatches(m.GetRegexString())
|
|
|
|
if len(setMatches) > 0 {
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, setMatches...)
|
2023-05-21 01:41:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path for MatchNotEqual matching.
|
|
|
|
// Inverse of a MatchNotEqual is MatchEqual (double negation).
|
|
|
|
if m.Type == labels.MatchNotEqual {
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, m.Value)
|
2023-05-21 01:41:30 -07:00
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
vals, err := ix.LabelValues(ctx, m.Name)
|
2017-12-17 10:08:21 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res []string
|
2023-01-05 06:05:29 -08:00
|
|
|
// If the inverse match is ="", we just want all the values.
|
|
|
|
if m.Type == labels.MatchEqual && m.Value == "" {
|
|
|
|
res = vals
|
|
|
|
} else {
|
|
|
|
for _, val := range vals {
|
|
|
|
if !m.Matches(val) {
|
|
|
|
res = append(res, val)
|
2020-06-25 06:10:29 -07:00
|
|
|
}
|
2017-12-17 10:08:21 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-10 19:53:35 -07:00
|
|
|
return ix.Postings(ctx, m.Name, res...)
|
2019-05-27 04:24:46 -07:00
|
|
|
}
|
|
|
|
|
2023-09-14 07:02:04 -07:00
|
|
|
func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) {
|
|
|
|
allValues, err := r.LabelValues(ctx, name)
|
2021-02-09 09:38:35 -08:00
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("fetching values of label %s: %w", name, err)
|
2021-02-09 09:38:35 -08:00
|
|
|
}
|
2023-07-04 02:37:58 -07:00
|
|
|
|
|
|
|
// If we have a matcher for the label name, we can filter out values that don't match
|
|
|
|
// before we fetch postings. This is especially useful for labels with many values.
|
|
|
|
// e.g. __name__ with a selector like {__name__="xyz"}
|
2024-01-23 02:40:21 -08:00
|
|
|
hasMatchersForOtherLabels := false
|
2023-07-04 02:37:58 -07:00
|
|
|
for _, m := range matchers {
|
|
|
|
if m.Name != name {
|
2024-01-23 02:40:21 -08:00
|
|
|
hasMatchersForOtherLabels = true
|
2023-07-04 02:37:58 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// re-use the allValues slice to avoid allocations
|
|
|
|
// this is safe because the iteration is always ahead of the append
|
|
|
|
filteredValues := allValues[:0]
|
|
|
|
for _, v := range allValues {
|
|
|
|
if m.Matches(v) {
|
|
|
|
filteredValues = append(filteredValues, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
allValues = filteredValues
|
|
|
|
}
|
|
|
|
|
2024-01-23 02:40:21 -08:00
|
|
|
if len(allValues) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have any matchers for other labels, then we're done.
|
|
|
|
if !hasMatchersForOtherLabels {
|
|
|
|
return allValues, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
p, err := PostingsForMatchers(ctx, r, matchers...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("fetching postings for matchers: %w", err)
|
|
|
|
}
|
|
|
|
|
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
|
|
|
valuesPostings := make([]index.Postings, len(allValues))
|
|
|
|
for i, value := range allValues {
|
2023-09-14 07:02:04 -07:00
|
|
|
valuesPostings[i], err = r.Postings(ctx, name, value)
|
2021-02-09 09:38:35 -08:00
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("fetching postings for %s=%q: %w", name, value, err)
|
2021-02-09 09:38:35 -08:00
|
|
|
}
|
|
|
|
}
|
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
|
|
|
indexes, err := index.FindIntersectingPostings(p, valuesPostings)
|
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
return nil, fmt.Errorf("intersecting postings: %w", err)
|
2021-02-09 09:38:35 -08:00
|
|
|
}
|
|
|
|
|
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
|
|
|
values := make([]string, 0, len(indexes))
|
|
|
|
for _, idx := range indexes {
|
|
|
|
values = append(values, allValues[idx])
|
2021-02-09 09:38:35 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return values, nil
|
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*labels.Matcher) ([]string, error) {
|
2023-05-10 19:53:35 -07:00
|
|
|
p, err := PostingsForMatchers(ctx, r, matchers...)
|
2021-07-20 05:38:08 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-11-06 03:10:04 -07:00
|
|
|
var postings []storage.SeriesRef
|
2021-07-20 05:38:08 -07:00
|
|
|
for p.Next() {
|
|
|
|
postings = append(postings, p.At())
|
|
|
|
}
|
2023-11-16 10:54:41 -08:00
|
|
|
if err := p.Err(); err != nil {
|
|
|
|
return nil, fmt.Errorf("postings for label names with matchers: %w", err)
|
2021-07-20 05:38:08 -07:00
|
|
|
}
|
|
|
|
|
2023-09-14 01:39:51 -07:00
|
|
|
return r.LabelNamesFor(ctx, postings...)
|
2021-07-20 05:38:08 -07:00
|
|
|
}
|
|
|
|
|
2022-12-15 10:29:44 -08:00
|
|
|
// seriesData, used inside other iterators, are updated when we move from one series to another.
|
2022-09-20 11:27:44 -07:00
|
|
|
type seriesData struct {
|
|
|
|
chks []chunks.Meta
|
|
|
|
intervals tombstones.Intervals
|
|
|
|
labels labels.Labels
|
|
|
|
}
|
|
|
|
|
|
|
|
// Labels implements part of storage.Series and storage.ChunkSeries.
|
|
|
|
func (s *seriesData) Labels() labels.Labels { return s.labels }
|
|
|
|
|
2020-08-03 03:32:56 -07:00
|
|
|
// blockBaseSeriesSet allows to iterate over all series in the single block.
|
|
|
|
// Iterated series are trimmed with given min and max time as well as tombstones.
|
2023-04-03 23:31:49 -07:00
|
|
|
// See newBlockSeriesSet and NewBlockChunkSeriesSet to use it for either sample or chunk iterating.
|
2020-08-03 03:32:56 -07:00
|
|
|
type blockBaseSeriesSet struct {
|
2022-11-28 00:12:54 -08:00
|
|
|
blockID ulid.ULID
|
2021-11-03 03:08:34 -07:00
|
|
|
p index.Postings
|
|
|
|
index IndexReader
|
|
|
|
chunks ChunkReader
|
|
|
|
tombstones tombstones.Reader
|
|
|
|
mint, maxt int64
|
|
|
|
disableTrimming bool
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
curr seriesData
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
bufChks []chunks.Meta
|
2022-06-28 08:03:26 -07:00
|
|
|
builder labels.ScratchBuilder
|
2020-08-03 03:32:56 -07:00
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blockBaseSeriesSet) Next() bool {
|
|
|
|
for b.p.Next() {
|
2022-12-15 10:19:15 -08:00
|
|
|
if err := b.index.Series(b.p.At(), &b.builder, &b.bufChks); err != nil {
|
2020-08-03 03:32:56 -07:00
|
|
|
// Postings may be stale. Skip if no underlying series exists.
|
2023-11-16 10:54:41 -08:00
|
|
|
if errors.Is(err, storage.ErrNotFound) {
|
2020-08-03 03:32:56 -07:00
|
|
|
continue
|
|
|
|
}
|
2023-11-16 10:54:41 -08:00
|
|
|
b.err = fmt.Errorf("get series %d: %w", b.p.At(), err)
|
2020-08-03 03:32:56 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(b.bufChks) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
intervals, err := b.tombstones.Get(b.p.At())
|
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
b.err = fmt.Errorf("get tombstones: %w", err)
|
2020-08-03 03:32:56 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE:
|
|
|
|
// * block time range is half-open: [meta.MinTime, meta.MaxTime).
|
|
|
|
// * chunks are both closed: [chk.MinTime, chk.MaxTime].
|
|
|
|
// * requested time ranges are closed: [req.Start, req.End].
|
|
|
|
|
|
|
|
var trimFront, trimBack bool
|
|
|
|
|
2021-04-12 13:43:42 -07:00
|
|
|
// Copy chunks as iterables are reusable.
|
2022-11-26 06:56:22 -08:00
|
|
|
// Count those in range to size allocation (roughly - ignoring tombstones).
|
|
|
|
nChks := 0
|
|
|
|
for _, chk := range b.bufChks {
|
|
|
|
if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) {
|
|
|
|
nChks++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
chks := make([]chunks.Meta, 0, nChks)
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
// Prefilter chunks and pick those which are not entirely deleted or totally outside of the requested range.
|
|
|
|
for _, chk := range b.bufChks {
|
|
|
|
if chk.MaxTime < b.mint {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if chk.MinTime > b.maxt {
|
|
|
|
continue
|
|
|
|
}
|
2022-11-26 07:22:59 -08:00
|
|
|
if (tombstones.Interval{Mint: chk.MinTime, Maxt: chk.MaxTime}.IsSubrange(intervals)) {
|
|
|
|
continue
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
2022-11-26 07:22:59 -08:00
|
|
|
chks = append(chks, chk)
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
// If still not entirely deleted, check if trim is needed based on requested time range.
|
2021-11-03 03:08:34 -07:00
|
|
|
if !b.disableTrimming {
|
|
|
|
if chk.MinTime < b.mint {
|
|
|
|
trimFront = true
|
|
|
|
}
|
|
|
|
if chk.MaxTime > b.maxt {
|
|
|
|
trimBack = true
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(chks) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if trimFront {
|
|
|
|
intervals = intervals.Add(tombstones.Interval{Mint: math.MinInt64, Maxt: b.mint - 1})
|
|
|
|
}
|
|
|
|
if trimBack {
|
|
|
|
intervals = intervals.Add(tombstones.Interval{Mint: b.maxt + 1, Maxt: math.MaxInt64})
|
|
|
|
}
|
2020-08-03 08:01:57 -07:00
|
|
|
|
2022-12-15 10:19:15 -08:00
|
|
|
b.curr.labels = b.builder.Labels()
|
2022-09-20 11:27:44 -07:00
|
|
|
b.curr.chks = chks
|
|
|
|
b.curr.intervals = intervals
|
2020-08-03 03:32:56 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blockBaseSeriesSet) Err() error {
|
|
|
|
if b.err != nil {
|
|
|
|
return b.err
|
|
|
|
}
|
|
|
|
return b.p.Err()
|
|
|
|
}
|
|
|
|
|
2023-09-14 09:57:31 -07:00
|
|
|
func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil }
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2022-07-06 05:34:02 -07:00
|
|
|
// populateWithDelGenericSeriesIterator allows to iterate over given chunk
|
|
|
|
// metas. In each iteration it ensures that chunks are trimmed based on given
|
|
|
|
// tombstones interval if any.
|
2020-08-03 03:32:56 -07:00
|
|
|
//
|
2022-07-06 05:34:02 -07:00
|
|
|
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully
|
|
|
|
// removed by intervals are filtered out in previous phase.
|
2020-08-03 03:32:56 -07:00
|
|
|
//
|
2023-11-28 02:14:29 -08:00
|
|
|
// On each iteration currMeta is available. If currDelIter is not nil, it
|
|
|
|
// means that the chunk in currMeta is invalid and a chunk rewrite is needed,
|
|
|
|
// for which currDelIter should be used.
|
2020-08-03 03:32:56 -07:00
|
|
|
type populateWithDelGenericSeriesIterator struct {
|
2022-11-28 00:12:54 -08:00
|
|
|
blockID ulid.ULID
|
2023-11-28 02:14:29 -08:00
|
|
|
cr ChunkReader
|
|
|
|
// metas are expected to be sorted by minTime and should be related to
|
2022-07-06 05:34:02 -07:00
|
|
|
// the same, single series.
|
2023-11-28 02:14:29 -08:00
|
|
|
// It's possible for a single chunks.Meta to refer to multiple chunks.
|
|
|
|
// cr.ChunkOrIterator() would return an iterable and a nil chunk in this
|
|
|
|
// case.
|
|
|
|
metas []chunks.Meta
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
i int // Index into metas; -1 if not started yet.
|
2020-08-03 03:32:56 -07:00
|
|
|
err error
|
2022-09-20 11:27:44 -07:00
|
|
|
bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here.
|
2020-08-03 03:32:56 -07:00
|
|
|
intervals tombstones.Intervals
|
|
|
|
|
|
|
|
currDelIter chunkenc.Iterator
|
2023-11-28 02:14:29 -08:00
|
|
|
// currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to
|
|
|
|
// the chunk returned from cr.ChunkOrIterable(). As that can return a nil
|
|
|
|
// chunk, currMeta.Chunk is not always guaranteed to be set.
|
|
|
|
currMeta chunks.Meta
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
|
|
|
|
p.blockID = blockID
|
2023-11-28 02:14:29 -08:00
|
|
|
p.cr = cr
|
|
|
|
p.metas = chks
|
2022-09-20 11:27:44 -07:00
|
|
|
p.i = -1
|
|
|
|
p.err = nil
|
2023-08-26 07:01:15 -07:00
|
|
|
// Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next().
|
2022-09-20 11:27:44 -07:00
|
|
|
p.bufIter.Intervals = p.bufIter.Intervals[:0]
|
|
|
|
p.intervals = intervals
|
|
|
|
p.currDelIter = nil
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMeta = chunks.Meta{}
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2023-02-19 09:34:51 -08:00
|
|
|
// If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB)
|
|
|
|
// is deep copied to avoid races between reads and copying chunk bytes.
|
|
|
|
// However, if the deletion intervals overlaps with the head chunk, then the head chunk is
|
|
|
|
// not copied irrespective of copyHeadChunk because it will be re-encoded later anyway.
|
|
|
|
func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool {
|
2023-11-28 02:14:29 -08:00
|
|
|
if p.err != nil || p.i >= len(p.metas)-1 {
|
2020-08-03 03:32:56 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
p.i++
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMeta = p.metas[p.i]
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2020-11-09 08:51:25 -08:00
|
|
|
p.bufIter.Intervals = p.bufIter.Intervals[:0]
|
2020-08-03 03:32:56 -07:00
|
|
|
for _, interval := range p.intervals {
|
2023-11-28 02:14:29 -08:00
|
|
|
if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) {
|
2020-11-09 08:51:25 -08:00
|
|
|
p.bufIter.Intervals = p.bufIter.Intervals.Add(interval)
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
hcr, ok := p.cr.(*headChunkReader)
|
|
|
|
var iterable chunkenc.Iterable
|
2023-02-19 09:34:51 -08:00
|
|
|
if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 {
|
|
|
|
// ChunkWithCopy will copy the head chunk.
|
|
|
|
var maxt int64
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta)
|
2023-02-19 09:34:51 -08:00
|
|
|
// For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here.
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMeta.MaxTime = maxt
|
2023-02-19 09:34:51 -08:00
|
|
|
} else {
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta)
|
2023-02-19 09:34:51 -08:00
|
|
|
}
|
2023-11-28 02:14:29 -08:00
|
|
|
|
2023-02-19 09:34:51 -08:00
|
|
|
if p.err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("cannot populate chunk %d from block %s: %w", p.currMeta.Ref, p.blockID.String(), p.err)
|
2023-02-19 09:34:51 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
// Use the single chunk if possible.
|
|
|
|
if p.currMeta.Chunk != nil {
|
|
|
|
if len(p.bufIter.Intervals) == 0 {
|
|
|
|
// If there is no overlap with deletion intervals and a single chunk is
|
|
|
|
// returned, we can take chunk as it is.
|
|
|
|
p.currDelIter = nil
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Otherwise we need to iterate over the samples in the single chunk
|
|
|
|
// and create new chunks.
|
|
|
|
p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter)
|
|
|
|
p.currDelIter = &p.bufIter
|
2020-08-03 03:32:56 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
// Otherwise, use the iterable to create an iterator.
|
|
|
|
p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter)
|
2022-09-20 11:27:44 -07:00
|
|
|
p.currDelIter = &p.bufIter
|
2020-08-03 03:32:56 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *populateWithDelGenericSeriesIterator) Err() error { return p.err }
|
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
type blockSeriesEntry struct {
|
|
|
|
chunks ChunkReader
|
|
|
|
blockID ulid.ULID
|
|
|
|
seriesData
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
2021-10-22 01:06:44 -07:00
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
func (s *blockSeriesEntry) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
|
|
|
|
pi, ok := it.(*populateWithDelSeriesIterator)
|
|
|
|
if !ok {
|
|
|
|
pi = &populateWithDelSeriesIterator{}
|
|
|
|
}
|
|
|
|
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
|
|
|
|
return pi
|
|
|
|
}
|
|
|
|
|
|
|
|
type chunkSeriesEntry struct {
|
|
|
|
chunks ChunkReader
|
|
|
|
blockID ulid.ULID
|
|
|
|
seriesData
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *chunkSeriesEntry) Iterator(it chunks.Iterator) chunks.Iterator {
|
|
|
|
pi, ok := it.(*populateWithDelChunkSeriesIterator)
|
|
|
|
if !ok {
|
|
|
|
pi = &populateWithDelChunkSeriesIterator{}
|
|
|
|
}
|
|
|
|
pi.reset(s.blockID, s.chunks, s.chks, s.intervals)
|
|
|
|
return pi
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// populateWithDelSeriesIterator allows to iterate over samples for the single series.
|
|
|
|
type populateWithDelSeriesIterator struct {
|
2022-09-20 11:27:44 -07:00
|
|
|
populateWithDelGenericSeriesIterator
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
curr chunkenc.Iterator
|
|
|
|
}
|
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
func (p *populateWithDelSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
|
|
|
|
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
|
|
|
|
p.curr = nil
|
|
|
|
}
|
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
|
|
|
|
if p.curr != nil {
|
|
|
|
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
|
|
|
|
return valueType
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2023-02-19 09:34:51 -08:00
|
|
|
for p.next(false) {
|
2020-08-03 03:32:56 -07:00
|
|
|
if p.currDelIter != nil {
|
|
|
|
p.curr = p.currDelIter
|
|
|
|
} else {
|
2023-11-28 02:14:29 -08:00
|
|
|
p.curr = p.currMeta.Chunk.Iterator(p.curr)
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
|
|
|
|
return valueType
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
return chunkenc.ValNone
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
func (p *populateWithDelSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
|
|
|
if p.curr != nil {
|
|
|
|
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
|
|
|
|
return valueType
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
for p.Next() != chunkenc.ValNone {
|
|
|
|
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
|
|
|
|
return valueType
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
return chunkenc.ValNone
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
func (p *populateWithDelSeriesIterator) At() (int64, float64) {
|
|
|
|
return p.curr.At()
|
|
|
|
}
|
2021-11-12 10:07:41 -08:00
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (p *populateWithDelSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
|
|
|
|
return p.curr.AtHistogram(h)
|
2021-06-30 07:48:13 -07:00
|
|
|
}
|
2021-11-12 10:07:41 -08:00
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (p *populateWithDelSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
|
|
|
return p.curr.AtFloatHistogram(fh)
|
2021-11-28 23:54:23 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *populateWithDelSeriesIterator) AtT() int64 {
|
|
|
|
return p.curr.AtT()
|
2021-06-29 07:38:46 -07:00
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
func (p *populateWithDelSeriesIterator) Err() error {
|
|
|
|
if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if p.curr != nil {
|
|
|
|
return p.curr.Err()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type populateWithDelChunkSeriesIterator struct {
|
2022-09-20 11:27:44 -07:00
|
|
|
populateWithDelGenericSeriesIterator
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
// currMetaWithChunk is current meta with its chunk field set. This meta
|
|
|
|
// is guaranteed to map to a single chunk. This differs from
|
|
|
|
// populateWithDelGenericSeriesIterator.currMeta as that
|
|
|
|
// could refer to multiple chunks.
|
|
|
|
currMetaWithChunk chunks.Meta
|
|
|
|
|
|
|
|
// chunksFromIterable stores the chunks created from iterating through
|
|
|
|
// the iterable returned by cr.ChunkOrIterable() (with deleted samples
|
|
|
|
// removed).
|
|
|
|
chunksFromIterable []chunks.Meta
|
|
|
|
chunksFromIterableIdx int
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2022-09-20 11:27:44 -07:00
|
|
|
func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) {
|
|
|
|
p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals)
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMetaWithChunk = chunks.Meta{}
|
|
|
|
p.chunksFromIterable = p.chunksFromIterable[:0]
|
|
|
|
p.chunksFromIterableIdx = -1
|
2022-09-20 11:27:44 -07:00
|
|
|
}
|
|
|
|
|
2020-08-03 03:32:56 -07:00
|
|
|
func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
2023-11-28 02:14:29 -08:00
|
|
|
if p.currMeta.Chunk == nil {
|
|
|
|
// If we've been creating chunks from the iterable, check if there are
|
|
|
|
// any more chunks to iterate through.
|
|
|
|
if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 {
|
|
|
|
p.chunksFromIterableIdx++
|
|
|
|
p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx]
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move to the next chunk/deletion iterator.
|
2023-11-29 02:24:04 -08:00
|
|
|
// This is a for loop as if the current p.currDelIter returns no samples
|
|
|
|
// (which means a chunk won't be created), there still might be more
|
|
|
|
// samples/chunks from the rest of p.metas.
|
|
|
|
for p.next(true) {
|
2023-11-28 02:14:29 -08:00
|
|
|
if p.currDelIter == nil {
|
|
|
|
p.currMetaWithChunk = p.currMeta
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2023-11-29 02:24:04 -08:00
|
|
|
if p.currMeta.Chunk != nil {
|
|
|
|
// If ChunkOrIterable() returned a non-nil chunk, the samples in
|
|
|
|
// p.currDelIter will only form one chunk, as the only change
|
|
|
|
// p.currDelIter might make is deleting some samples.
|
|
|
|
if p.populateCurrForSingleChunk() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If ChunkOrIterable() returned an iterable, multiple chunks may be
|
|
|
|
// created from the samples in p.currDelIter.
|
|
|
|
if p.populateChunksFromIterable() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return false
|
2023-11-28 02:14:29 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This
|
|
|
|
// should be called if the samples in p.currDelIter only form one chunk.
|
|
|
|
func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool {
|
2021-11-28 23:54:23 -08:00
|
|
|
valueType := p.currDelIter.Next()
|
|
|
|
if valueType == chunkenc.ValNone {
|
2020-08-03 03:32:56 -07:00
|
|
|
if err := p.currDelIter.Err(); err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMetaWithChunk.MinTime = p.currDelIter.AtT()
|
2020-08-03 03:32:56 -07:00
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
// Re-encode the chunk if iterator is provided. This means that it has
|
2022-07-06 05:34:02 -07:00
|
|
|
// some samples to be deleted or chunk is opened.
|
2021-07-04 03:42:37 -07:00
|
|
|
var (
|
2021-11-28 23:54:23 -08:00
|
|
|
newChunk chunkenc.Chunk
|
|
|
|
app chunkenc.Appender
|
|
|
|
t int64
|
|
|
|
err error
|
2021-07-04 03:42:37 -07:00
|
|
|
)
|
2021-11-28 23:54:23 -08:00
|
|
|
switch valueType {
|
|
|
|
case chunkenc.ValHistogram:
|
|
|
|
newChunk = chunkenc.NewHistogramChunk()
|
|
|
|
if app, err = newChunk.Appender(); err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
2022-07-06 05:34:02 -07:00
|
|
|
if vt != chunkenc.ValHistogram {
|
|
|
|
err = fmt.Errorf("found value type %v in histogram chunk", vt)
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
var h *histogram.Histogram
|
2024-01-23 08:02:14 -08:00
|
|
|
t, h = p.currDelIter.AtHistogram(nil)
|
2023-07-26 06:08:16 -07:00
|
|
|
_, _, app, err = app.AppendHistogram(nil, t, h, true)
|
|
|
|
if err != nil {
|
|
|
|
break
|
2022-07-06 05:34:02 -07:00
|
|
|
}
|
2021-07-04 03:42:37 -07:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
case chunkenc.ValFloat:
|
|
|
|
newChunk = chunkenc.NewXORChunk()
|
|
|
|
if app, err = newChunk.Appender(); err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
2022-07-06 05:34:02 -07:00
|
|
|
if vt != chunkenc.ValFloat {
|
|
|
|
err = fmt.Errorf("found value type %v in float chunk", vt)
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
var v float64
|
2021-07-04 03:42:37 -07:00
|
|
|
t, v = p.currDelIter.At()
|
|
|
|
app.Append(t, v)
|
|
|
|
}
|
2022-12-28 00:55:07 -08:00
|
|
|
case chunkenc.ValFloatHistogram:
|
|
|
|
newChunk = chunkenc.NewFloatHistogramChunk()
|
|
|
|
if app, err = newChunk.Appender(); err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
for vt := valueType; vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
2022-12-28 00:55:07 -08:00
|
|
|
if vt != chunkenc.ValFloatHistogram {
|
|
|
|
err = fmt.Errorf("found value type %v in histogram chunk", vt)
|
|
|
|
break
|
|
|
|
}
|
2023-11-02 06:23:05 -07:00
|
|
|
var h *histogram.FloatHistogram
|
2024-01-23 08:02:14 -08:00
|
|
|
t, h = p.currDelIter.AtFloatHistogram(nil)
|
2023-07-26 06:08:16 -07:00
|
|
|
_, _, app, err = app.AppendFloatHistogram(nil, t, h, true)
|
|
|
|
if err != nil {
|
|
|
|
break
|
2022-12-28 00:55:07 -08:00
|
|
|
}
|
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
default:
|
2023-11-28 02:14:29 -08:00
|
|
|
err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType)
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
2021-07-04 03:42:37 -07:00
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
|
2021-11-28 23:54:23 -08:00
|
|
|
return false
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
if err := p.currDelIter.Err(); err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("iterate chunk while re-encoding: %w", err)
|
2020-08-03 03:32:56 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
p.currMetaWithChunk.Chunk = newChunk
|
|
|
|
p.currMetaWithChunk.MaxTime = t
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// populateChunksFromIterable reads the samples from currDelIter to create
|
|
|
|
// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first
|
|
|
|
// chunk.
|
|
|
|
func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool {
|
|
|
|
p.chunksFromIterable = p.chunksFromIterable[:0]
|
|
|
|
p.chunksFromIterableIdx = -1
|
|
|
|
|
|
|
|
firstValueType := p.currDelIter.Next()
|
|
|
|
if firstValueType == chunkenc.ValNone {
|
|
|
|
if err := p.currDelIter.Err(); err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("populateChunksFromIterable: no samples could be read: %w", err)
|
2023-11-28 02:14:29 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
// t is the timestamp for the current sample.
|
|
|
|
t int64
|
|
|
|
cmint int64
|
|
|
|
cmaxt int64
|
|
|
|
|
|
|
|
currentChunk chunkenc.Chunk
|
|
|
|
|
|
|
|
app chunkenc.Appender
|
|
|
|
|
|
|
|
newChunk chunkenc.Chunk
|
|
|
|
recoded bool
|
|
|
|
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
prevValueType := chunkenc.ValNone
|
|
|
|
|
|
|
|
for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() {
|
|
|
|
// Check if the encoding has changed (i.e. we need to create a new
|
|
|
|
// chunk as chunks can't have multiple encoding types).
|
|
|
|
// For the first sample, the following condition will always be true as
|
|
|
|
// ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram.
|
|
|
|
if currentValueType != prevValueType {
|
|
|
|
if prevValueType != chunkenc.ValNone {
|
|
|
|
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
|
|
|
}
|
|
|
|
cmint = p.currDelIter.AtT()
|
|
|
|
if currentChunk, err = currentValueType.NewChunk(); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if app, err = currentChunk.Appender(); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch currentValueType {
|
|
|
|
case chunkenc.ValFloat:
|
|
|
|
{
|
|
|
|
var v float64
|
|
|
|
t, v = p.currDelIter.At()
|
|
|
|
app.Append(t, v)
|
|
|
|
}
|
|
|
|
case chunkenc.ValHistogram:
|
|
|
|
{
|
|
|
|
var v *histogram.Histogram
|
2024-01-23 08:02:14 -08:00
|
|
|
t, v = p.currDelIter.AtHistogram(nil)
|
2023-11-28 02:14:29 -08:00
|
|
|
// No need to set prevApp as AppendHistogram will set the
|
|
|
|
// counter reset header for the appender that's returned.
|
|
|
|
newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false)
|
|
|
|
}
|
|
|
|
case chunkenc.ValFloatHistogram:
|
|
|
|
{
|
|
|
|
var v *histogram.FloatHistogram
|
2024-01-23 08:02:14 -08:00
|
|
|
t, v = p.currDelIter.AtFloatHistogram(nil)
|
2023-11-28 02:14:29 -08:00
|
|
|
// No need to set prevApp as AppendHistogram will set the
|
|
|
|
// counter reset header for the appender that's returned.
|
|
|
|
newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if newChunk != nil {
|
|
|
|
if !recoded {
|
|
|
|
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
|
|
|
}
|
|
|
|
currentChunk = newChunk
|
|
|
|
cmint = t
|
|
|
|
}
|
|
|
|
|
|
|
|
cmaxt = t
|
|
|
|
prevValueType = currentValueType
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("populateChunksFromIterable: error when writing new chunks: %w", err)
|
2023-11-28 02:14:29 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if err = p.currDelIter.Err(); err != nil {
|
2023-11-16 10:54:41 -08:00
|
|
|
p.err = fmt.Errorf("populateChunksFromIterable: currDelIter error when writing new chunks: %w", err)
|
2023-11-28 02:14:29 -08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if prevValueType != chunkenc.ValNone {
|
|
|
|
p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt})
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(p.chunksFromIterable) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
p.currMetaWithChunk = p.chunksFromIterable[0]
|
|
|
|
p.chunksFromIterableIdx = 0
|
2020-08-03 03:32:56 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk }
|
2020-08-03 03:32:56 -07:00
|
|
|
|
|
|
|
// blockSeriesSet allows to iterate over sorted, populated series with applied tombstones.
|
|
|
|
// Series with all deleted chunks are still present as Series with no samples.
|
|
|
|
// Samples from chunks are also trimmed to requested min and max time.
|
|
|
|
type blockSeriesSet struct {
|
|
|
|
blockBaseSeriesSet
|
|
|
|
}
|
|
|
|
|
2021-11-03 03:08:34 -07:00
|
|
|
func newBlockSeriesSet(i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.SeriesSet {
|
2020-08-03 03:32:56 -07:00
|
|
|
return &blockSeriesSet{
|
|
|
|
blockBaseSeriesSet{
|
2021-11-03 03:08:34 -07:00
|
|
|
index: i,
|
|
|
|
chunks: c,
|
|
|
|
tombstones: t,
|
|
|
|
p: p,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
disableTrimming: disableTrimming,
|
2020-08-03 03:32:56 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blockSeriesSet) At() storage.Series {
|
2022-09-20 11:27:44 -07:00
|
|
|
// At can be looped over before iterating, so save the current values locally.
|
|
|
|
return &blockSeriesEntry{
|
|
|
|
chunks: b.chunks,
|
|
|
|
blockID: b.blockID,
|
|
|
|
seriesData: b.curr,
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockChunkSeriesSet allows to iterate over sorted, populated series with applied tombstones.
|
|
|
|
// Series with all deleted chunks are still present as Labelled iterator with no chunks.
|
|
|
|
// Chunks are also trimmed to requested [min and max] (keeping samples with min and max timestamps).
|
|
|
|
type blockChunkSeriesSet struct {
|
|
|
|
blockBaseSeriesSet
|
|
|
|
}
|
|
|
|
|
2023-04-03 23:31:49 -07:00
|
|
|
func NewBlockChunkSeriesSet(id ulid.ULID, i IndexReader, c ChunkReader, t tombstones.Reader, p index.Postings, mint, maxt int64, disableTrimming bool) storage.ChunkSeriesSet {
|
2020-08-03 03:32:56 -07:00
|
|
|
return &blockChunkSeriesSet{
|
|
|
|
blockBaseSeriesSet{
|
2022-11-28 00:12:54 -08:00
|
|
|
blockID: id,
|
2021-11-03 03:08:34 -07:00
|
|
|
index: i,
|
|
|
|
chunks: c,
|
|
|
|
tombstones: t,
|
|
|
|
p: p,
|
|
|
|
mint: mint,
|
|
|
|
maxt: maxt,
|
|
|
|
disableTrimming: disableTrimming,
|
2020-08-03 03:32:56 -07:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blockChunkSeriesSet) At() storage.ChunkSeries {
|
2022-09-20 11:27:44 -07:00
|
|
|
// At can be looped over before iterating, so save the current values locally.
|
|
|
|
return &chunkSeriesEntry{
|
|
|
|
chunks: b.chunks,
|
|
|
|
blockID: b.blockID,
|
|
|
|
seriesData: b.curr,
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 08:51:25 -08:00
|
|
|
// NewMergedStringIter returns string iterator that allows to merge symbols on demand and stream result.
|
2021-10-22 01:06:44 -07:00
|
|
|
func NewMergedStringIter(a, b index.StringIter) index.StringIter {
|
2020-08-03 03:32:56 -07:00
|
|
|
return &mergedStringIter{a: a, b: b, aok: a.Next(), bok: b.Next()}
|
|
|
|
}
|
|
|
|
|
|
|
|
type mergedStringIter struct {
|
|
|
|
a index.StringIter
|
|
|
|
b index.StringIter
|
|
|
|
aok, bok bool
|
|
|
|
cur string
|
2023-03-27 04:40:45 -07:00
|
|
|
err error
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mergedStringIter) Next() bool {
|
|
|
|
if (!m.aok && !m.bok) || (m.Err() != nil) {
|
|
|
|
return false
|
|
|
|
}
|
2023-04-09 00:08:40 -07:00
|
|
|
switch {
|
|
|
|
case !m.aok:
|
2020-08-03 03:32:56 -07:00
|
|
|
m.cur = m.b.At()
|
|
|
|
m.bok = m.b.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
m.err = m.b.Err()
|
2023-04-09 00:08:40 -07:00
|
|
|
case !m.bok:
|
2020-08-03 03:32:56 -07:00
|
|
|
m.cur = m.a.At()
|
|
|
|
m.aok = m.a.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
m.err = m.a.Err()
|
2023-04-09 00:08:40 -07:00
|
|
|
case m.b.At() > m.a.At():
|
2020-08-03 03:32:56 -07:00
|
|
|
m.cur = m.a.At()
|
|
|
|
m.aok = m.a.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
m.err = m.a.Err()
|
2023-04-09 00:08:40 -07:00
|
|
|
case m.a.At() > m.b.At():
|
2020-08-03 03:32:56 -07:00
|
|
|
m.cur = m.b.At()
|
|
|
|
m.bok = m.b.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
m.err = m.b.Err()
|
2023-04-09 00:08:40 -07:00
|
|
|
default: // Equal.
|
2020-08-03 03:32:56 -07:00
|
|
|
m.cur = m.b.At()
|
|
|
|
m.aok = m.a.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
m.err = m.a.Err()
|
2020-08-03 03:32:56 -07:00
|
|
|
m.bok = m.b.Next()
|
2023-03-27 04:40:45 -07:00
|
|
|
if m.err == nil {
|
|
|
|
m.err = m.b.Err()
|
|
|
|
}
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
func (m mergedStringIter) At() string { return m.cur }
|
|
|
|
func (m mergedStringIter) Err() error {
|
2023-03-27 04:40:45 -07:00
|
|
|
return m.err
|
2020-08-03 03:32:56 -07:00
|
|
|
}
|
|
|
|
|
2020-11-09 08:51:25 -08:00
|
|
|
// DeletedIterator wraps chunk Iterator and makes sure any deleted metrics are not returned.
|
|
|
|
type DeletedIterator struct {
|
|
|
|
// Iter is an Iterator to be wrapped.
|
|
|
|
Iter chunkenc.Iterator
|
|
|
|
// Intervals are the deletion intervals.
|
|
|
|
Intervals tombstones.Intervals
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2020-11-09 08:51:25 -08:00
|
|
|
func (it *DeletedIterator) At() (int64, float64) {
|
|
|
|
return it.Iter.At()
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (it *DeletedIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
|
|
|
|
t, h := it.Iter.AtHistogram(h)
|
2021-06-30 07:48:13 -07:00
|
|
|
return t, h
|
|
|
|
}
|
|
|
|
|
2024-01-23 08:02:14 -08:00
|
|
|
func (it *DeletedIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
|
|
|
t, h := it.Iter.AtFloatHistogram(fh)
|
2021-11-28 23:54:23 -08:00
|
|
|
return t, h
|
|
|
|
}
|
|
|
|
|
|
|
|
func (it *DeletedIterator) AtT() int64 {
|
|
|
|
return it.Iter.AtT()
|
2021-06-29 07:38:46 -07:00
|
|
|
}
|
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
func (it *DeletedIterator) Seek(t int64) chunkenc.ValueType {
|
2020-11-09 08:51:25 -08:00
|
|
|
if it.Iter.Err() != nil {
|
2021-11-28 23:54:23 -08:00
|
|
|
return chunkenc.ValNone
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
valueType := it.Iter.Seek(t)
|
|
|
|
if valueType == chunkenc.ValNone {
|
|
|
|
return chunkenc.ValNone
|
2020-04-23 02:00:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now double check if the entry falls into a deleted interval.
|
2021-11-28 23:54:23 -08:00
|
|
|
ts := it.AtT()
|
2020-11-09 08:51:25 -08:00
|
|
|
for _, itv := range it.Intervals {
|
2020-04-23 02:00:30 -07:00
|
|
|
if ts < itv.Mint {
|
2021-11-28 23:54:23 -08:00
|
|
|
return valueType
|
2020-04-23 02:00:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if ts > itv.Maxt {
|
2020-11-09 08:51:25 -08:00
|
|
|
it.Intervals = it.Intervals[1:]
|
2020-04-23 02:00:30 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're in the middle of an interval, we can now call Next().
|
|
|
|
return it.Next()
|
|
|
|
}
|
|
|
|
|
|
|
|
// The timestamp is greater than all the deleted intervals.
|
2021-11-28 23:54:23 -08:00
|
|
|
return valueType
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
|
|
|
|
2021-11-28 23:54:23 -08:00
|
|
|
func (it *DeletedIterator) Next() chunkenc.ValueType {
|
2017-11-30 06:34:49 -08:00
|
|
|
Outer:
|
2021-11-28 23:54:23 -08:00
|
|
|
for valueType := it.Iter.Next(); valueType != chunkenc.ValNone; valueType = it.Iter.Next() {
|
|
|
|
ts := it.AtT()
|
2020-11-09 08:51:25 -08:00
|
|
|
for _, tr := range it.Intervals {
|
2019-09-19 02:15:41 -07:00
|
|
|
if tr.InBounds(ts) {
|
2017-11-30 06:34:49 -08:00
|
|
|
continue Outer
|
|
|
|
}
|
|
|
|
|
2020-02-06 07:58:38 -08:00
|
|
|
if ts <= tr.Maxt {
|
2021-11-28 23:54:23 -08:00
|
|
|
return valueType
|
2020-02-06 07:58:38 -08:00
|
|
|
}
|
2020-11-09 08:51:25 -08:00
|
|
|
it.Intervals = it.Intervals[1:]
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
return valueType
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
2021-11-28 23:54:23 -08:00
|
|
|
return chunkenc.ValNone
|
2017-11-30 06:34:49 -08:00
|
|
|
}
|
|
|
|
|
2020-11-09 08:51:25 -08:00
|
|
|
func (it *DeletedIterator) Err() error { return it.Iter.Err() }
|
2020-10-14 02:06:17 -07:00
|
|
|
|
|
|
|
type nopChunkReader struct {
|
|
|
|
emptyChunk chunkenc.Chunk
|
|
|
|
}
|
|
|
|
|
|
|
|
func newNopChunkReader() ChunkReader {
|
|
|
|
return nopChunkReader{
|
|
|
|
emptyChunk: chunkenc.NewXORChunk(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-28 02:14:29 -08:00
|
|
|
func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) {
|
|
|
|
return cr.emptyChunk, nil, nil
|
2021-11-06 03:10:04 -07:00
|
|
|
}
|
2020-10-14 02:06:17 -07:00
|
|
|
|
|
|
|
func (cr nopChunkReader) Close() error { return nil }
|