2021-08-03 05:14:26 -07:00
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
"math"
"sort"
2022-09-15 00:52:09 -07:00
"sync"
2021-08-03 05:14:26 -07:00
"github.com/go-kit/log/level"
"github.com/pkg/errors"
2022-09-30 07:33:56 -07:00
"golang.org/x/exp/slices"
2021-08-03 05:14:26 -07:00
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2021-08-03 05:14:26 -07:00
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
)
func ( h * Head ) ExemplarQuerier ( ctx context . Context ) ( storage . ExemplarQuerier , error ) {
return h . exemplars . ExemplarQuerier ( ctx )
}
// Index returns an IndexReader against the block.
func ( h * Head ) Index ( ) ( IndexReader , error ) {
return h . indexRange ( math . MinInt64 , math . MaxInt64 ) , nil
}
func ( h * Head ) indexRange ( mint , maxt int64 ) * headIndexReader {
if hmin := h . MinTime ( ) ; hmin > mint {
mint = hmin
}
return & headIndexReader { head : h , mint : mint , maxt : maxt }
}
type headIndexReader struct {
head * Head
mint , maxt int64
}
func ( h * headIndexReader ) Close ( ) error {
return nil
}
func ( h * headIndexReader ) Symbols ( ) index . StringIter {
2021-09-08 02:18:48 -07:00
return h . head . postings . Symbols ( )
2021-08-03 05:14:26 -07:00
}
// SortedLabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func ( h * headIndexReader ) SortedLabelValues ( name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
values , err := h . LabelValues ( name , matchers ... )
if err == nil {
2022-09-30 07:33:56 -07:00
slices . Sort ( values )
2021-08-03 05:14:26 -07:00
}
return values , err
}
// LabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func ( h * headIndexReader ) LabelValues ( name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
if h . maxt < h . head . MinTime ( ) || h . mint > h . head . MaxTime ( ) {
return [ ] string { } , nil
}
if len ( matchers ) == 0 {
return h . head . postings . LabelValues ( name ) , nil
}
return labelValuesWithMatchers ( h , name , matchers ... )
}
// LabelNames returns all the unique label names present in the head
// that are within the time range mint to maxt.
func ( h * headIndexReader ) LabelNames ( matchers ... * labels . Matcher ) ( [ ] string , error ) {
if h . maxt < h . head . MinTime ( ) || h . mint > h . head . MaxTime ( ) {
return [ ] string { } , nil
}
if len ( matchers ) == 0 {
labelNames := h . head . postings . LabelNames ( )
2022-09-30 07:33:56 -07:00
slices . Sort ( labelNames )
2021-08-03 05:14:26 -07:00
return labelNames , nil
}
return labelNamesWithMatchers ( h , matchers ... )
}
// Postings returns the postings list iterator for the label pairs.
func ( h * headIndexReader ) Postings ( name string , values ... string ) ( index . Postings , error ) {
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
switch len ( values ) {
case 0 :
return index . EmptyPostings ( ) , nil
case 1 :
return h . head . postings . Get ( name , values [ 0 ] ) , nil
default :
res := make ( [ ] index . Postings , 0 , len ( values ) )
for _ , value := range values {
2023-01-10 01:51:49 -08:00
if p := h . head . postings . Get ( name , value ) ; ! index . IsEmptyPostingsType ( p ) {
res = append ( res , p )
}
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
}
return index . Merge ( res ... ) , nil
2021-08-03 05:14:26 -07:00
}
}
func ( h * headIndexReader ) SortedPostings ( p index . Postings ) index . Postings {
series := make ( [ ] * memSeries , 0 , 128 )
// Fetch all the series only once.
for p . Next ( ) {
2021-11-06 03:10:04 -07:00
s := h . head . series . getByID ( chunks . HeadSeriesRef ( p . At ( ) ) )
2021-08-03 05:14:26 -07:00
if s == nil {
level . Debug ( h . head . logger ) . Log ( "msg" , "Looked up series not found" )
} else {
series = append ( series , s )
}
}
if err := p . Err ( ) ; err != nil {
return index . ErrPostings ( errors . Wrap ( err , "expand postings" ) )
}
sort . Slice ( series , func ( i , j int ) bool {
return labels . Compare ( series [ i ] . lset , series [ j ] . lset ) < 0
} )
// Convert back to list.
2021-11-06 03:10:04 -07:00
ep := make ( [ ] storage . SeriesRef , 0 , len ( series ) )
2021-08-03 05:14:26 -07:00
for _ , p := range series {
2021-11-06 03:10:04 -07:00
ep = append ( ep , storage . SeriesRef ( p . ref ) )
2021-08-03 05:14:26 -07:00
}
return index . NewListPostings ( ep )
}
// Series returns the series for the given reference.
2022-12-15 10:19:15 -08:00
func ( h * headIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
2021-11-06 03:10:04 -07:00
s := h . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
2021-08-03 05:14:26 -07:00
if s == nil {
h . head . metrics . seriesNotFound . Inc ( )
return storage . ErrNotFound
}
2022-12-15 10:19:15 -08:00
builder . Assign ( s . lset )
2021-08-03 05:14:26 -07:00
s . Lock ( )
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
for i , c := range s . mmappedChunks {
// Do not expose chunks that are outside of the specified range.
if ! c . OverlapsClosedInterval ( h . mint , h . maxt ) {
continue
}
* chks = append ( * chks , chunks . Meta {
MinTime : c . minTime ,
MaxTime : c . maxTime ,
2021-11-17 05:05:10 -08:00
Ref : chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . headChunkID ( i ) ) ) ,
2021-08-03 05:14:26 -07:00
} )
}
if s . headChunk != nil && s . headChunk . OverlapsClosedInterval ( h . mint , h . maxt ) {
* chks = append ( * chks , chunks . Meta {
MinTime : s . headChunk . minTime ,
MaxTime : math . MaxInt64 , // Set the head chunks as open (being appended to).
2021-11-17 05:05:10 -08:00
Ref : chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . headChunkID ( len ( s . mmappedChunks ) ) ) ) ,
2021-08-03 05:14:26 -07:00
} )
}
return nil
}
2022-09-20 10:05:50 -07:00
// headChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
// * pos == len(s.mmappedChunks) refers to s.headChunk
2021-11-17 05:05:10 -08:00
func ( s * memSeries ) headChunkID ( pos int ) chunks . HeadChunkID {
return chunks . HeadChunkID ( pos ) + s . firstChunkID
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
2022-12-28 02:19:41 -08:00
// The caller must ensure that s.ooo is not nil.
2022-09-20 10:05:50 -07:00
func ( s * memSeries ) oooHeadChunkID ( pos int ) chunks . HeadChunkID {
2022-12-28 02:19:41 -08:00
return chunks . HeadChunkID ( pos ) + s . ooo . firstOOOChunkID
2022-09-20 10:05:50 -07:00
}
2021-08-03 05:14:26 -07:00
// LabelValueFor returns label value for the given label name in the series referred to by ID.
2021-11-06 03:10:04 -07:00
func ( h * headIndexReader ) LabelValueFor ( id storage . SeriesRef , label string ) ( string , error ) {
memSeries := h . head . series . getByID ( chunks . HeadSeriesRef ( id ) )
2021-08-03 05:14:26 -07:00
if memSeries == nil {
return "" , storage . ErrNotFound
}
value := memSeries . lset . Get ( label )
if value == "" {
return "" , storage . ErrNotFound
}
return value , nil
}
// LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted.
2021-11-06 03:10:04 -07:00
func ( h * headIndexReader ) LabelNamesFor ( ids ... storage . SeriesRef ) ( [ ] string , error ) {
2021-08-03 05:14:26 -07:00
namesMap := make ( map [ string ] struct { } )
for _ , id := range ids {
2021-11-06 03:10:04 -07:00
memSeries := h . head . series . getByID ( chunks . HeadSeriesRef ( id ) )
2021-08-03 05:14:26 -07:00
if memSeries == nil {
return nil , storage . ErrNotFound
}
2022-03-09 14:17:40 -08:00
memSeries . lset . Range ( func ( lbl labels . Label ) {
2021-08-03 05:14:26 -07:00
namesMap [ lbl . Name ] = struct { } { }
2022-03-09 14:17:40 -08:00
} )
2021-08-03 05:14:26 -07:00
}
names := make ( [ ] string , 0 , len ( namesMap ) )
for name := range namesMap {
names = append ( names , name )
}
2022-09-30 07:33:56 -07:00
slices . Sort ( names )
2021-08-03 05:14:26 -07:00
return names , nil
}
// Chunks returns a ChunkReader against the block.
func ( h * Head ) Chunks ( ) ( ChunkReader , error ) {
return h . chunksRange ( math . MinInt64 , math . MaxInt64 , h . iso . State ( math . MinInt64 , math . MaxInt64 ) )
}
func ( h * Head ) chunksRange ( mint , maxt int64 , is * isolationState ) ( * headChunkReader , error ) {
h . closedMtx . Lock ( )
defer h . closedMtx . Unlock ( )
if h . closed {
return nil , errors . New ( "can't read from a closed head" )
}
if hmin := h . MinTime ( ) ; hmin > mint {
mint = hmin
}
return & headChunkReader {
head : h ,
mint : mint ,
maxt : maxt ,
isoState : is ,
} , nil
}
type headChunkReader struct {
head * Head
mint , maxt int64
isoState * isolationState
}
func ( h * headChunkReader ) Close ( ) error {
2022-09-27 07:01:23 -07:00
if h . isoState != nil {
h . isoState . Close ( )
}
2021-08-03 05:14:26 -07:00
return nil
}
// Chunk returns the chunk for the reference number.
2022-09-20 10:05:50 -07:00
func ( h * headChunkReader ) Chunk ( meta chunks . Meta ) ( chunkenc . Chunk , error ) {
2023-02-19 09:34:51 -08:00
chk , _ , err := h . chunk ( meta , false )
return chk , err
}
// ChunkWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
func ( h * headChunkReader ) ChunkWithCopy ( meta chunks . Meta ) ( chunkenc . Chunk , int64 , error ) {
return h . chunk ( meta , true )
}
// chunk returns the chunk for the reference number.
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
// Also returns max time of the chunk.
func ( h * headChunkReader ) chunk ( meta chunks . Meta , copyLastChunk bool ) ( chunkenc . Chunk , int64 , error ) {
2022-09-20 10:05:50 -07:00
sid , cid := chunks . HeadChunkRef ( meta . Ref ) . Unpack ( )
2021-08-03 05:14:26 -07:00
s := h . head . series . getByID ( sid )
// This means that the series has been garbage collected.
if s == nil {
2023-02-19 09:34:51 -08:00
return nil , 0 , storage . ErrNotFound
2021-08-03 05:14:26 -07:00
}
s . Lock ( )
2023-02-19 09:34:51 -08:00
c , headChunk , err := s . chunk ( cid , h . head . chunkDiskMapper , & h . head . memChunkPool )
2021-08-03 05:14:26 -07:00
if err != nil {
s . Unlock ( )
2023-02-19 09:34:51 -08:00
return nil , 0 , err
2021-08-03 05:14:26 -07:00
}
defer func ( ) {
2023-02-19 09:34:51 -08:00
if ! headChunk {
2021-08-03 05:14:26 -07:00
// Set this to nil so that Go GC can collect it after it has been used.
c . chunk = nil
2022-09-15 00:52:09 -07:00
h . head . memChunkPool . Put ( c )
2021-08-03 05:14:26 -07:00
}
} ( )
// This means that the chunk is outside the specified range.
if ! c . OverlapsClosedInterval ( h . mint , h . maxt ) {
s . Unlock ( )
2023-02-19 09:34:51 -08:00
return nil , 0 , storage . ErrNotFound
}
chk , maxTime := c . chunk , c . maxTime
if headChunk && copyLastChunk {
// The caller may ask to copy the head chunk in order to take the
// bytes of the chunk without causing the race between read and append.
b := s . headChunk . chunk . Bytes ( )
newB := make ( [ ] byte , len ( b ) )
copy ( newB , b ) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
// TODO(codesome): Put back in the pool (non-trivial).
chk , err = h . head . opts . ChunkPool . Get ( s . headChunk . chunk . Encoding ( ) , newB )
if err != nil {
return nil , 0 , err
}
2021-08-03 05:14:26 -07:00
}
s . Unlock ( )
2023-05-19 01:24:06 -07:00
return & safeHeadChunk {
2023-02-19 09:34:51 -08:00
Chunk : chk ,
2023-02-21 01:30:11 -08:00
s : s ,
cid : cid ,
isoState : h . isoState ,
2023-02-19 09:34:51 -08:00
} , maxTime , nil
2021-08-03 05:14:26 -07:00
}
2021-11-17 05:05:10 -08:00
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
2023-03-21 02:45:36 -07:00
// If headChunk is false, it means that the returned *memChunk
2021-11-17 02:21:27 -08:00
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
2023-02-19 09:34:51 -08:00
func ( s * memSeries ) chunk ( id chunks . HeadChunkID , chunkDiskMapper * chunks . ChunkDiskMapper , memChunkPool * sync . Pool ) ( chunk * memChunk , headChunk bool , err error ) {
2021-08-03 05:14:26 -07:00
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
2021-11-17 05:05:10 -08:00
ix := int ( id ) - int ( s . firstChunkID )
2021-08-03 05:14:26 -07:00
if ix < 0 || ix > len ( s . mmappedChunks ) {
return nil , false , storage . ErrNotFound
}
2023-02-19 09:34:51 -08:00
2021-08-03 05:14:26 -07:00
if ix == len ( s . mmappedChunks ) {
if s . headChunk == nil {
return nil , false , errors . New ( "invalid head chunk" )
}
2023-02-19 09:34:51 -08:00
return s . headChunk , true , nil
2021-08-03 05:14:26 -07:00
}
chk , err := chunkDiskMapper . Chunk ( s . mmappedChunks [ ix ] . ref )
if err != nil {
if _ , ok := err . ( * chunks . CorruptionErr ) ; ok {
panic ( err )
}
return nil , false , err
}
2022-09-15 00:52:09 -07:00
mc := memChunkPool . Get ( ) . ( * memChunk )
2021-08-03 05:14:26 -07:00
mc . chunk = chk
mc . minTime = s . mmappedChunks [ ix ] . minTime
mc . maxTime = s . mmappedChunks [ ix ] . maxTime
2023-02-19 09:34:51 -08:00
return mc , false , nil
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// oooMergedChunk returns the requested chunk based on the given chunks.Meta
// reference from memory or by m-mapping it from the disk. The returned chunk
// might be a merge of all the overlapping chunks, if any, amongst all the
// chunks in the OOOHead.
// This function is not thread safe unless the caller holds a lock.
2022-12-28 02:19:41 -08:00
// The caller must ensure that s.ooo is not nil.
2022-09-20 10:05:50 -07:00
func ( s * memSeries ) oooMergedChunk ( meta chunks . Meta , cdm * chunks . ChunkDiskMapper , mint , maxt int64 ) ( chunk * mergedOOOChunks , err error ) {
_ , cid := chunks . HeadChunkRef ( meta . Ref ) . Unpack ( )
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
2022-12-28 02:19:41 -08:00
ix := int ( cid ) - int ( s . ooo . firstOOOChunkID )
if ix < 0 || ix > len ( s . ooo . oooMmappedChunks ) {
2022-09-20 10:05:50 -07:00
return nil , storage . ErrNotFound
}
2022-12-28 02:19:41 -08:00
if ix == len ( s . ooo . oooMmappedChunks ) {
if s . ooo . oooHeadChunk == nil {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "invalid ooo head chunk" )
}
}
// We create a temporary slice of chunk metas to hold the information of all
// possible chunks that may overlap with the requested chunk.
2022-12-28 02:19:41 -08:00
tmpChks := make ( [ ] chunkMetaAndChunkDiskMapperRef , 0 , len ( s . ooo . oooMmappedChunks ) )
2022-09-20 10:05:50 -07:00
2022-12-28 02:19:41 -08:00
oooHeadRef := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( len ( s . ooo . oooMmappedChunks ) ) ) )
if s . ooo . oooHeadChunk != nil && s . ooo . oooHeadChunk . OverlapsClosedInterval ( mint , maxt ) {
2022-09-20 10:05:50 -07:00
// We only want to append the head chunk if this chunk existed when
// Series() was called. This brings consistency in case new data
// is added in between Series() and Chunk() calls.
if oooHeadRef == meta . OOOLastRef {
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
// Ignoring samples added before and after the last known min and max time for this chunk.
MinTime : meta . OOOLastMinTime ,
MaxTime : meta . OOOLastMaxTime ,
Ref : oooHeadRef ,
} ,
} )
}
}
2022-12-28 02:19:41 -08:00
for i , c := range s . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
chunkRef := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( i ) ) )
// We can skip chunks that came in later than the last known OOOLastRef.
if chunkRef > meta . OOOLastRef {
break
}
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case chunkRef == meta . OOOLastRef :
2022-09-20 10:05:50 -07:00
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
MinTime : meta . OOOLastMinTime ,
MaxTime : meta . OOOLastMaxTime ,
Ref : chunkRef ,
} ,
ref : c . ref ,
origMinT : c . minTime ,
origMaxT : c . maxTime ,
} )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case c . OverlapsClosedInterval ( mint , maxt ) :
2022-09-20 10:05:50 -07:00
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
MinTime : c . minTime ,
MaxTime : c . maxTime ,
Ref : chunkRef ,
} ,
ref : c . ref ,
} )
}
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap and stop when we know the rest don't.
sort . Sort ( byMinTimeAndMinRef ( tmpChks ) )
mc := & mergedOOOChunks { }
absoluteMax := int64 ( math . MinInt64 )
for _ , c := range tmpChks {
if c . meta . Ref != meta . Ref && ( len ( mc . chunks ) == 0 || c . meta . MinTime > absoluteMax ) {
continue
}
if c . meta . Ref == oooHeadRef {
var xor * chunkenc . XORChunk
// If head chunk min and max time match the meta OOO markers
// that means that the chunk has not expanded so we can append
// it as it is.
2022-12-28 02:19:41 -08:00
if s . ooo . oooHeadChunk . minTime == meta . OOOLastMinTime && s . ooo . oooHeadChunk . maxTime == meta . OOOLastMaxTime {
xor , err = s . ooo . oooHeadChunk . chunk . ToXOR ( ) // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
2022-09-20 10:05:50 -07:00
} else {
// We need to remove samples that are outside of the markers
2022-12-28 02:19:41 -08:00
xor , err = s . ooo . oooHeadChunk . chunk . ToXORBetweenTimestamps ( meta . OOOLastMinTime , meta . OOOLastMaxTime )
2022-09-20 10:05:50 -07:00
}
if err != nil {
return nil , errors . Wrap ( err , "failed to convert ooo head chunk to xor chunk" )
}
c . meta . Chunk = xor
} else {
chk , err := cdm . Chunk ( c . ref )
if err != nil {
if _ , ok := err . ( * chunks . CorruptionErr ) ; ok {
return nil , errors . Wrap ( err , "invalid ooo mmapped chunk" )
}
return nil , err
}
if c . meta . Ref == meta . OOOLastRef &&
( c . origMinT != meta . OOOLastMinTime || c . origMaxT != meta . OOOLastMaxTime ) {
// The head expanded and was memory mapped so now we need to
// wrap the chunk within a chunk that doesnt allows us to iterate
// through samples out of the OOOLastMinT and OOOLastMaxT
// markers.
c . meta . Chunk = boundedChunk { chk , meta . OOOLastMinTime , meta . OOOLastMaxTime }
} else {
c . meta . Chunk = chk
}
}
mc . chunks = append ( mc . chunks , c . meta )
if c . meta . MaxTime > absoluteMax {
absoluteMax = c . meta . MaxTime
}
}
return mc , nil
}
var _ chunkenc . Chunk = & mergedOOOChunks { }
// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies
// chunkenc.Chunk.
type mergedOOOChunks struct {
chunks [ ] chunks . Meta
}
// Bytes is a very expensive method because its calling the iterator of all the
// chunks in the mergedOOOChunk and building a new chunk with the samples.
func ( o mergedOOOChunks ) Bytes ( ) [ ] byte {
xc := chunkenc . NewXORChunk ( )
app , err := xc . Appender ( )
if err != nil {
panic ( err )
}
it := o . Iterator ( nil )
2022-10-11 09:35:35 -07:00
for it . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
t , v := it . At ( )
app . Append ( t , v )
}
return xc . Bytes ( )
}
func ( o mergedOOOChunks ) Encoding ( ) chunkenc . Encoding {
return chunkenc . EncXOR
}
func ( o mergedOOOChunks ) Appender ( ) ( chunkenc . Appender , error ) {
return nil , errors . New ( "can't append to mergedOOOChunks" )
}
func ( o mergedOOOChunks ) Iterator ( iterator chunkenc . Iterator ) chunkenc . Iterator {
2022-09-20 11:31:28 -07:00
return storage . ChainSampleIteratorFromMetas ( iterator , o . chunks )
2022-09-20 10:05:50 -07:00
}
func ( o mergedOOOChunks ) NumSamples ( ) int {
samples := 0
for _ , c := range o . chunks {
samples += c . Chunk . NumSamples ( )
}
return samples
}
func ( o mergedOOOChunks ) Compact ( ) { }
var _ chunkenc . Chunk = & boundedChunk { }
// boundedChunk is an implementation of chunkenc.Chunk that uses a
// boundedIterator that only iterates through samples which timestamps are
// >= minT and <= maxT
type boundedChunk struct {
chunkenc . Chunk
minT int64
maxT int64
}
func ( b boundedChunk ) Bytes ( ) [ ] byte {
xor := chunkenc . NewXORChunk ( )
a , _ := xor . Appender ( )
it := b . Iterator ( nil )
2022-10-11 09:35:35 -07:00
for it . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
t , v := it . At ( )
a . Append ( t , v )
}
return xor . Bytes ( )
}
func ( b boundedChunk ) Iterator ( iterator chunkenc . Iterator ) chunkenc . Iterator {
it := b . Chunk . Iterator ( iterator )
if it == nil {
panic ( "iterator shouldn't be nil" )
}
return boundedIterator { it , b . minT , b . maxT }
}
var _ chunkenc . Iterator = & boundedIterator { }
// boundedIterator is an implementation of Iterator that only iterates through
// samples which timestamps are >= minT and <= maxT
type boundedIterator struct {
chunkenc . Iterator
minT int64
maxT int64
}
// Next the first time its called it will advance as many positions as necessary
// until its able to find a sample within the bounds minT and maxT.
// If there are samples within bounds it will advance one by one amongst them.
// If there are no samples within bounds it will return false.
2022-10-11 09:35:35 -07:00
func ( b boundedIterator ) Next ( ) chunkenc . ValueType {
for b . Iterator . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
t , _ := b . Iterator . At ( )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case t < b . minT :
2022-09-20 10:05:50 -07:00
continue
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case t > b . maxT :
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
default :
return chunkenc . ValFloat
2022-09-20 10:05:50 -07:00
}
}
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
2022-10-11 09:35:35 -07:00
func ( b boundedIterator ) Seek ( t int64 ) chunkenc . ValueType {
2022-09-20 10:05:50 -07:00
if t < b . minT {
// We must seek at least up to b.minT if it is asked for something before that.
2022-10-11 09:35:35 -07:00
val := b . Iterator . Seek ( b . minT )
if ! ( val == chunkenc . ValFloat ) {
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
t , _ := b . Iterator . At ( )
2022-10-11 09:35:35 -07:00
if t <= b . maxT {
return chunkenc . ValFloat
}
2022-09-20 10:05:50 -07:00
}
if t > b . maxT {
// We seek anyway so that the subsequent Next() calls will also return false.
b . Iterator . Seek ( t )
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
return b . Iterator . Seek ( t )
}
2023-05-19 01:24:06 -07:00
// safeHeadChunk makes sure that the chunk can be accessed without a race condition
type safeHeadChunk struct {
2021-08-03 05:14:26 -07:00
chunkenc . Chunk
2023-02-21 01:30:11 -08:00
s * memSeries
cid chunks . HeadChunkID
isoState * isolationState
2021-08-03 05:14:26 -07:00
}
2023-05-19 01:24:06 -07:00
func ( c * safeHeadChunk ) Iterator ( reuseIter chunkenc . Iterator ) chunkenc . Iterator {
2021-08-03 05:14:26 -07:00
c . s . Lock ( )
2023-02-21 01:30:11 -08:00
it := c . s . iterator ( c . cid , c . Chunk , c . isoState , reuseIter )
2021-08-03 05:14:26 -07:00
c . s . Unlock ( )
return it
}
2021-11-17 05:05:10 -08:00
// iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range.
2021-08-03 05:14:26 -07:00
// It is unsafe to call this concurrently with s.append(...) without holding the series lock.
2023-02-21 01:30:11 -08:00
func ( s * memSeries ) iterator ( id chunks . HeadChunkID , c chunkenc . Chunk , isoState * isolationState , it chunkenc . Iterator ) chunkenc . Iterator {
2021-11-17 05:05:10 -08:00
ix := int ( id ) - int ( s . firstChunkID )
2021-08-03 05:14:26 -07:00
2023-02-21 01:30:11 -08:00
numSamples := c . NumSamples ( )
2021-08-03 05:14:26 -07:00
stopAfter := numSamples
2021-11-19 02:11:32 -08:00
if isoState != nil && ! isoState . IsolationDisabled ( ) {
2021-08-03 05:14:26 -07:00
totalSamples := 0 // Total samples in this series.
previousSamples := 0 // Samples before this chunk.
for j , d := range s . mmappedChunks {
totalSamples += int ( d . numSamples )
if j < ix {
previousSamples += int ( d . numSamples )
}
}
if s . headChunk != nil {
totalSamples += s . headChunk . chunk . NumSamples ( )
}
// Removing the extra transactionIDs that are relevant for samples that
// come after this chunk, from the total transactionIDs.
appendIDsToConsider := s . txs . txIDCount - ( totalSamples - ( previousSamples + numSamples ) )
// Iterate over the appendIDs, find the first one that the isolation state says not
// to return.
it := s . txs . iterator ( )
for index := 0 ; index < appendIDsToConsider ; index ++ {
appendID := it . At ( )
if appendID <= isoState . maxAppendID { // Easy check first.
if _ , ok := isoState . incompleteAppends [ appendID ] ; ! ok {
it . Next ( )
continue
}
}
stopAfter = numSamples - ( appendIDsToConsider - index )
if stopAfter < 0 {
stopAfter = 0 // Stopped in a previous chunk.
}
break
}
}
if stopAfter == 0 {
return chunkenc . NewNopIterator ( )
}
2022-09-27 07:02:05 -07:00
if stopAfter == numSamples {
2023-02-21 01:30:11 -08:00
return c . Iterator ( it )
2021-08-03 05:14:26 -07:00
}
2023-02-21 01:30:11 -08:00
return makeStopIterator ( c , it , stopAfter )
2021-08-03 05:14:26 -07:00
}
2021-11-17 02:21:27 -08:00
// stopIterator wraps an Iterator, but only returns the first
// stopAfter values, if initialized with i=-1.
2021-08-03 05:14:26 -07:00
type stopIterator struct {
chunkenc . Iterator
i , stopAfter int
}
2021-11-28 23:54:23 -08:00
func ( it * stopIterator ) Next ( ) chunkenc . ValueType {
2021-08-03 05:14:26 -07:00
if it . i + 1 >= it . stopAfter {
2021-11-28 23:54:23 -08:00
return chunkenc . ValNone
2021-08-03 05:14:26 -07:00
}
it . i ++
return it . Iterator . Next ( )
}
2022-10-05 13:14:49 -07:00
func makeStopIterator ( c chunkenc . Chunk , it chunkenc . Iterator , stopAfter int ) chunkenc . Iterator {
// Re-use the Iterator object if it is a stopIterator.
if stopIter , ok := it . ( * stopIterator ) ; ok {
stopIter . Iterator = c . Iterator ( stopIter . Iterator )
stopIter . i = - 1
stopIter . stopAfter = stopAfter
return stopIter
}
return & stopIterator {
Iterator : c . Iterator ( it ) ,
i : - 1 ,
stopAfter : stopAfter ,
}
}