2021-08-03 05:14:26 -07:00
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
2023-11-16 10:54:41 -08:00
"errors"
"fmt"
2021-08-03 05:14:26 -07:00
"math"
2022-09-15 00:52:09 -07:00
"sync"
2021-08-03 05:14:26 -07:00
"github.com/go-kit/log/level"
2022-09-30 07:33:56 -07:00
"golang.org/x/exp/slices"
2021-08-03 05:14:26 -07:00
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2021-08-03 05:14:26 -07:00
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
)
func ( h * Head ) ExemplarQuerier ( ctx context . Context ) ( storage . ExemplarQuerier , error ) {
return h . exemplars . ExemplarQuerier ( ctx )
}
// Index returns an IndexReader against the block.
func ( h * Head ) Index ( ) ( IndexReader , error ) {
return h . indexRange ( math . MinInt64 , math . MaxInt64 ) , nil
}
func ( h * Head ) indexRange ( mint , maxt int64 ) * headIndexReader {
if hmin := h . MinTime ( ) ; hmin > mint {
mint = hmin
}
return & headIndexReader { head : h , mint : mint , maxt : maxt }
}
type headIndexReader struct {
head * Head
mint , maxt int64
}
func ( h * headIndexReader ) Close ( ) error {
return nil
}
func ( h * headIndexReader ) Symbols ( ) index . StringIter {
2021-09-08 02:18:48 -07:00
return h . head . postings . Symbols ( )
2021-08-03 05:14:26 -07:00
}
// SortedLabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
2023-09-14 07:02:04 -07:00
func ( h * headIndexReader ) SortedLabelValues ( ctx context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
values , err := h . LabelValues ( ctx , name , matchers ... )
2021-08-03 05:14:26 -07:00
if err == nil {
2022-09-30 07:33:56 -07:00
slices . Sort ( values )
2021-08-03 05:14:26 -07:00
}
return values , err
}
// LabelValues returns label values present in the head for the
// specific label name that are within the time range mint to maxt.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
2023-09-14 07:02:04 -07:00
func ( h * headIndexReader ) LabelValues ( ctx context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
2021-08-03 05:14:26 -07:00
if h . maxt < h . head . MinTime ( ) || h . mint > h . head . MaxTime ( ) {
return [ ] string { } , nil
}
if len ( matchers ) == 0 {
2023-09-14 07:02:04 -07:00
return h . head . postings . LabelValues ( ctx , name ) , nil
2021-08-03 05:14:26 -07:00
}
2023-09-14 07:02:04 -07:00
return labelValuesWithMatchers ( ctx , h , name , matchers ... )
2021-08-03 05:14:26 -07:00
}
// LabelNames returns all the unique label names present in the head
// that are within the time range mint to maxt.
2023-09-14 01:39:51 -07:00
func ( h * headIndexReader ) LabelNames ( ctx context . Context , matchers ... * labels . Matcher ) ( [ ] string , error ) {
2021-08-03 05:14:26 -07:00
if h . maxt < h . head . MinTime ( ) || h . mint > h . head . MaxTime ( ) {
return [ ] string { } , nil
}
if len ( matchers ) == 0 {
labelNames := h . head . postings . LabelNames ( )
2022-09-30 07:33:56 -07:00
slices . Sort ( labelNames )
2021-08-03 05:14:26 -07:00
return labelNames , nil
}
2023-09-14 01:39:51 -07:00
return labelNamesWithMatchers ( ctx , h , matchers ... )
2021-08-03 05:14:26 -07:00
}
// Postings returns the postings list iterator for the label pairs.
2023-09-13 08:45:06 -07:00
func ( h * headIndexReader ) Postings ( ctx context . Context , name string , values ... string ) ( index . Postings , error ) {
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
switch len ( values ) {
case 0 :
return index . EmptyPostings ( ) , nil
case 1 :
return h . head . postings . Get ( name , values [ 0 ] ) , nil
default :
res := make ( [ ] index . Postings , 0 , len ( values ) )
for _ , value := range values {
2023-01-10 01:51:49 -08:00
if p := h . head . postings . Get ( name , value ) ; ! index . IsEmptyPostingsType ( p ) {
res = append ( res , p )
}
Label values with matchers by intersecting postings (#9907)
* LabelValues w/matchers by intersecting postings
Instead of iterating all matched series to find the values, this
checks if each one of the label values is present in the matched series
(postings).
Pending to be benchmarked.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Benchmark labelValuesWithMatchers
name old time/op new time/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0%
name old alloc/op new alloc/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0%
name old allocs/op new allocs/op
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Don't expand postings to intersect them
Using a min heap we can check whether matched postings intersect with
each one of the label values postings. This avoid expanding postings
(and thus having all of them in memory at any point).
Slightly slower than the expanding postings version for some cases, but
definitely pays the price once the cardinality grows.
Still offers 10x latency improvement where previous latencies were
reaching 1s.
Benchmark results:
name \ time/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 157ms ± 0% 48ms ± 0% 110ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 1.80s ± 0% 0.46s ± 0% 0.18s ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 144ms ± 0% 57ms ± 0% 125ms ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304ms ± 0% 111ms ± 0% 177ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 761ms ± 0% 164ms ± 0% 134ms ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 6.11µs ± 0% 6.62µs ± 0% 4.29µs ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 117ms ± 0% 62ms ± 0% 120ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 1.44s ± 0% 0.24s ± 0% 0.15s ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 92.1ms ± 0% 70.3ms ± 0% 125.4ms ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 196ms ± 0% 115ms ± 0% 170ms ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 1.23s ± 0% 0.21s ± 0% 0.14s ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 1.06ms ± 0% 0.88ms ± 0% 0.92ms ± 0%
name \ alloc/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 29.5MB ± 0% 26.9MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 46.8MB ± 0% 251.5MB ± 0% 36.3MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 29.5MB ± 0% 22.3MB ± 0% 19.1MB ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 46.8MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 10.3kB ± 0% 138535.2kB ± 0% 6.4kB ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 5.54kB ± 0% 7.09kB ± 0% 4.30kB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 39.1MB ± 0% 28.5MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 287MB ± 0% 253MB ± 0% 38MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 34.3MB ± 0% 23.9MB ± 0% 20.7MB ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 51.6MB ± 0% 25.5MB ± 0% 22.3MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 144MB ± 0% 139MB ± 0% 0MB ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 6.43kB ± 0% 8.66kB ± 0% 5.86kB ± 0%
name \ allocs/op old.txt intersect.txt intersect_noexpand.txt
Querier/Head/labelValuesWithMatchers/i_with_n="1" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="^.+$" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",j!="foo" 104k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 204k ± 0% 500k ± 0% 300k ± 0%
Querier/Head/labelValuesWithMatchers/n_with_j!="foo" 66.0 ± 0% 255.0 ± 0% 139.0 ± 0%
Querier/Head/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 205.0 ± 0% 87.0 ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="^.+$" 5.20M ± 0% 0.70M ± 0% 0.50M ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",j!="foo" 204k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/i_with_n="1",i=~"^.*$",j!="foo" 304k ± 0% 600k ± 0% 400k ± 0%
Querier/Block/labelValuesWithMatchers/n_with_j!="foo" 3.00M ± 0% 0.00M ± 0% 0.00M ± 0%
Querier/Block/labelValuesWithMatchers/n_with_i="1" 61.0 ± 0% 247.0 ± 0% 129.0 ± 0%
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Apply comment suggestions from the code review
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
* Change else { if } to else if
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Remove sorting of label values
We were not sorting them before, so no need to sort them now
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Ganesh Vernekar <15064823+codesome@users.noreply.github.com>
2021-12-28 06:59:03 -08:00
}
2023-09-13 08:45:06 -07:00
return index . Merge ( ctx , res ... ) , nil
2021-08-03 05:14:26 -07:00
}
}
func ( h * headIndexReader ) SortedPostings ( p index . Postings ) index . Postings {
series := make ( [ ] * memSeries , 0 , 128 )
// Fetch all the series only once.
for p . Next ( ) {
2021-11-06 03:10:04 -07:00
s := h . head . series . getByID ( chunks . HeadSeriesRef ( p . At ( ) ) )
2021-08-03 05:14:26 -07:00
if s == nil {
level . Debug ( h . head . logger ) . Log ( "msg" , "Looked up series not found" )
} else {
series = append ( series , s )
}
}
if err := p . Err ( ) ; err != nil {
2023-11-16 10:54:41 -08:00
return index . ErrPostings ( fmt . Errorf ( "expand postings: %w" , err ) )
2021-08-03 05:14:26 -07:00
}
2023-09-21 13:53:51 -07:00
slices . SortFunc ( series , func ( a , b * memSeries ) int {
return labels . Compare ( a . lset , b . lset )
2021-08-03 05:14:26 -07:00
} )
// Convert back to list.
2021-11-06 03:10:04 -07:00
ep := make ( [ ] storage . SeriesRef , 0 , len ( series ) )
2021-08-03 05:14:26 -07:00
for _ , p := range series {
2021-11-06 03:10:04 -07:00
ep = append ( ep , storage . SeriesRef ( p . ref ) )
2021-08-03 05:14:26 -07:00
}
return index . NewListPostings ( ep )
}
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
// ShardedPostings implements IndexReader. This function returns an failing postings list if sharding
// has not been enabled in the Head.
func ( h * headIndexReader ) ShardedPostings ( p index . Postings , shardIndex , shardCount uint64 ) index . Postings {
if ! h . head . opts . EnableSharding {
return index . ErrPostings ( errors . New ( "sharding is disabled" ) )
}
out := make ( [ ] storage . SeriesRef , 0 , 128 )
for p . Next ( ) {
s := h . head . series . getByID ( chunks . HeadSeriesRef ( p . At ( ) ) )
if s == nil {
level . Debug ( h . head . logger ) . Log ( "msg" , "Looked up series not found" )
continue
}
// Check if the series belong to the shard.
if s . shardHash % shardCount != shardIndex {
continue
}
out = append ( out , storage . SeriesRef ( s . ref ) )
}
return index . NewListPostings ( out )
}
2021-08-03 05:14:26 -07:00
// Series returns the series for the given reference.
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
// Chunks are skipped if chks is nil.
2022-12-15 10:19:15 -08:00
func ( h * headIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
2021-11-06 03:10:04 -07:00
s := h . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
2021-08-03 05:14:26 -07:00
if s == nil {
h . head . metrics . seriesNotFound . Inc ( )
return storage . ErrNotFound
}
2022-12-15 10:19:15 -08:00
builder . Assign ( s . lset )
2021-08-03 05:14:26 -07:00
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
if chks == nil {
return nil
}
2021-08-03 05:14:26 -07:00
s . Lock ( )
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
for i , c := range s . mmappedChunks {
// Do not expose chunks that are outside of the specified range.
if ! c . OverlapsClosedInterval ( h . mint , h . maxt ) {
continue
}
* chks = append ( * chks , chunks . Meta {
MinTime : c . minTime ,
MaxTime : c . maxTime ,
2021-11-17 05:05:10 -08:00
Ref : chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . headChunkID ( i ) ) ) ,
2021-08-03 05:14:26 -07:00
} )
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if s . headChunks != nil {
var maxTime int64
var i , j int
for i = s . headChunks . len ( ) - 1 ; i >= 0 ; i -- {
chk := s . headChunks . atOffset ( i )
if i == 0 {
// Set the head chunk as open (being appended to) for the first headChunk.
maxTime = math . MaxInt64
} else {
maxTime = chk . maxTime
}
if chk . OverlapsClosedInterval ( h . mint , h . maxt ) {
* chks = append ( * chks , chunks . Meta {
MinTime : chk . minTime ,
MaxTime : maxTime ,
Ref : chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . headChunkID ( len ( s . mmappedChunks ) + j ) ) ) ,
} )
}
j ++
}
2021-08-03 05:14:26 -07:00
}
return nil
}
2022-09-20 10:05:50 -07:00
// headChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos]
2023-10-03 13:09:25 -07:00
// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list.
2021-11-17 05:05:10 -08:00
func ( s * memSeries ) headChunkID ( pos int ) chunks . HeadChunkID {
return chunks . HeadChunkID ( pos ) + s . firstChunkID
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// oooHeadChunkID returns the HeadChunkID referred to by the given position.
// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos]
// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk
2022-12-28 02:19:41 -08:00
// The caller must ensure that s.ooo is not nil.
2022-09-20 10:05:50 -07:00
func ( s * memSeries ) oooHeadChunkID ( pos int ) chunks . HeadChunkID {
2022-12-28 02:19:41 -08:00
return chunks . HeadChunkID ( pos ) + s . ooo . firstOOOChunkID
2022-09-20 10:05:50 -07:00
}
2021-08-03 05:14:26 -07:00
// LabelValueFor returns label value for the given label name in the series referred to by ID.
2023-09-14 07:02:04 -07:00
func ( h * headIndexReader ) LabelValueFor ( _ context . Context , id storage . SeriesRef , label string ) ( string , error ) {
2021-11-06 03:10:04 -07:00
memSeries := h . head . series . getByID ( chunks . HeadSeriesRef ( id ) )
2021-08-03 05:14:26 -07:00
if memSeries == nil {
return "" , storage . ErrNotFound
}
value := memSeries . lset . Get ( label )
if value == "" {
return "" , storage . ErrNotFound
}
return value , nil
}
// LabelNamesFor returns all the label names for the series referred to by IDs.
// The names returned are sorted.
2023-09-14 01:39:51 -07:00
func ( h * headIndexReader ) LabelNamesFor ( ctx context . Context , ids ... storage . SeriesRef ) ( [ ] string , error ) {
2021-08-03 05:14:26 -07:00
namesMap := make ( map [ string ] struct { } )
for _ , id := range ids {
2023-09-14 01:39:51 -07:00
if ctx . Err ( ) != nil {
return nil , ctx . Err ( )
}
2021-11-06 03:10:04 -07:00
memSeries := h . head . series . getByID ( chunks . HeadSeriesRef ( id ) )
2021-08-03 05:14:26 -07:00
if memSeries == nil {
return nil , storage . ErrNotFound
}
2022-03-09 14:17:40 -08:00
memSeries . lset . Range ( func ( lbl labels . Label ) {
2021-08-03 05:14:26 -07:00
namesMap [ lbl . Name ] = struct { } { }
2022-03-09 14:17:40 -08:00
} )
2021-08-03 05:14:26 -07:00
}
names := make ( [ ] string , 0 , len ( namesMap ) )
for name := range namesMap {
names = append ( names , name )
}
2022-09-30 07:33:56 -07:00
slices . Sort ( names )
2021-08-03 05:14:26 -07:00
return names , nil
}
// Chunks returns a ChunkReader against the block.
func ( h * Head ) Chunks ( ) ( ChunkReader , error ) {
return h . chunksRange ( math . MinInt64 , math . MaxInt64 , h . iso . State ( math . MinInt64 , math . MaxInt64 ) )
}
func ( h * Head ) chunksRange ( mint , maxt int64 , is * isolationState ) ( * headChunkReader , error ) {
h . closedMtx . Lock ( )
defer h . closedMtx . Unlock ( )
if h . closed {
return nil , errors . New ( "can't read from a closed head" )
}
if hmin := h . MinTime ( ) ; hmin > mint {
mint = hmin
}
return & headChunkReader {
head : h ,
mint : mint ,
maxt : maxt ,
isoState : is ,
} , nil
}
type headChunkReader struct {
head * Head
mint , maxt int64
isoState * isolationState
}
func ( h * headChunkReader ) Close ( ) error {
2022-09-27 07:01:23 -07:00
if h . isoState != nil {
h . isoState . Close ( )
}
2021-08-03 05:14:26 -07:00
return nil
}
2023-11-28 02:14:29 -08:00
// ChunkOrIterable returns the chunk for the reference number.
func ( h * headChunkReader ) ChunkOrIterable ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , error ) {
2023-02-19 09:34:51 -08:00
chk , _ , err := h . chunk ( meta , false )
2023-11-28 02:14:29 -08:00
return chk , nil , err
2023-02-19 09:34:51 -08:00
}
// ChunkWithCopy returns the chunk for the reference number.
// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk.
func ( h * headChunkReader ) ChunkWithCopy ( meta chunks . Meta ) ( chunkenc . Chunk , int64 , error ) {
return h . chunk ( meta , true )
}
// chunk returns the chunk for the reference number.
// If copyLastChunk is true, then it makes a copy of the head chunk if asked for it.
// Also returns max time of the chunk.
func ( h * headChunkReader ) chunk ( meta chunks . Meta , copyLastChunk bool ) ( chunkenc . Chunk , int64 , error ) {
2022-09-20 10:05:50 -07:00
sid , cid := chunks . HeadChunkRef ( meta . Ref ) . Unpack ( )
2021-08-03 05:14:26 -07:00
s := h . head . series . getByID ( sid )
// This means that the series has been garbage collected.
if s == nil {
2023-02-19 09:34:51 -08:00
return nil , 0 , storage . ErrNotFound
2021-08-03 05:14:26 -07:00
}
s . Lock ( )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c , headChunk , isOpen , err := s . chunk ( cid , h . head . chunkDiskMapper , & h . head . memChunkPool )
2021-08-03 05:14:26 -07:00
if err != nil {
s . Unlock ( )
2023-02-19 09:34:51 -08:00
return nil , 0 , err
2021-08-03 05:14:26 -07:00
}
defer func ( ) {
2023-02-19 09:34:51 -08:00
if ! headChunk {
2021-08-03 05:14:26 -07:00
// Set this to nil so that Go GC can collect it after it has been used.
c . chunk = nil
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c . prev = nil
2022-09-15 00:52:09 -07:00
h . head . memChunkPool . Put ( c )
2021-08-03 05:14:26 -07:00
}
} ( )
// This means that the chunk is outside the specified range.
if ! c . OverlapsClosedInterval ( h . mint , h . maxt ) {
s . Unlock ( )
2023-02-19 09:34:51 -08:00
return nil , 0 , storage . ErrNotFound
}
chk , maxTime := c . chunk , c . maxTime
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if headChunk && isOpen && copyLastChunk {
2023-02-19 09:34:51 -08:00
// The caller may ask to copy the head chunk in order to take the
// bytes of the chunk without causing the race between read and append.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
b := s . headChunks . chunk . Bytes ( )
2023-02-19 09:34:51 -08:00
newB := make ( [ ] byte , len ( b ) )
copy ( newB , b ) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20.
// TODO(codesome): Put back in the pool (non-trivial).
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
chk , err = h . head . opts . ChunkPool . Get ( s . headChunks . chunk . Encoding ( ) , newB )
2023-02-19 09:34:51 -08:00
if err != nil {
return nil , 0 , err
}
2021-08-03 05:14:26 -07:00
}
s . Unlock ( )
2023-05-19 01:24:06 -07:00
return & safeHeadChunk {
2023-02-19 09:34:51 -08:00
Chunk : chk ,
2023-02-21 01:30:11 -08:00
s : s ,
cid : cid ,
isoState : h . isoState ,
2023-02-19 09:34:51 -08:00
} , maxTime , nil
2021-08-03 05:14:26 -07:00
}
2021-11-17 05:05:10 -08:00
// chunk returns the chunk for the HeadChunkID from memory or by m-mapping it from the disk.
2023-03-21 02:45:36 -07:00
// If headChunk is false, it means that the returned *memChunk
2021-11-17 02:21:27 -08:00
// (and not the chunkenc.Chunk inside it) can be garbage collected after its usage.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
// if isOpen is true, it means that the returned *memChunk is used for appends.
func ( s * memSeries ) chunk ( id chunks . HeadChunkID , chunkDiskMapper * chunks . ChunkDiskMapper , memChunkPool * sync . Pool ) ( chunk * memChunk , headChunk , isOpen bool , err error ) {
2021-08-03 05:14:26 -07:00
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are
// incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
// is >= len(s.mmappedChunks), it represents one of the chunks on s.headChunks linked list.
// The order of elemens is different for slice and linked list.
// For s.mmappedChunks slice newer chunks are appended to it.
// For s.headChunks list newer chunks are prepended to it.
//
// memSeries {
// mmappedChunks: [t0, t1, t2]
// headChunk: {t5}->{t4}->{t3}
// }
2021-11-17 05:05:10 -08:00
ix := int ( id ) - int ( s . firstChunkID )
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
var headChunksLen int
if s . headChunks != nil {
headChunksLen = s . headChunks . len ( )
2021-08-03 05:14:26 -07:00
}
2023-02-19 09:34:51 -08:00
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if ix < 0 || ix > len ( s . mmappedChunks ) + headChunksLen - 1 {
return nil , false , false , storage . ErrNotFound
2021-08-03 05:14:26 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if ix < len ( s . mmappedChunks ) {
chk , err := chunkDiskMapper . Chunk ( s . mmappedChunks [ ix ] . ref )
if err != nil {
2023-11-16 10:54:41 -08:00
var cerr * chunks . CorruptionErr
if errors . As ( err , & cerr ) {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
panic ( err )
}
return nil , false , false , err
2021-08-03 05:14:26 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
mc := memChunkPool . Get ( ) . ( * memChunk )
mc . chunk = chk
mc . minTime = s . mmappedChunks [ ix ] . minTime
mc . maxTime = s . mmappedChunks [ ix ] . maxTime
return mc , false , false , nil
2021-08-03 05:14:26 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
ix -= len ( s . mmappedChunks )
offset := headChunksLen - ix - 1
// headChunks is a linked list where first element is the most recent one and the last one is the oldest.
// This order is reversed when compared with mmappedChunks, since mmappedChunks[0] is the oldest chunk,
// while headChunk.atOffset(0) would give us the most recent chunk.
// So when calling headChunk.atOffset() we need to reverse the value of ix.
elem := s . headChunks . atOffset ( offset )
if elem == nil {
// This should never really happen and would mean that headChunksLen value is NOT equal
// to the length of the headChunks list.
return nil , false , false , storage . ErrNotFound
}
return elem , true , offset == 0 , nil
2021-08-03 05:14:26 -07:00
}
2023-11-28 02:14:29 -08:00
// oooMergedChunks return an iterable over one or more OOO chunks for the given
// chunks.Meta reference from memory or by m-mapping it from the disk. The
// returned iterable will be a merge of all the overlapping chunks, if any,
// amongst all the chunks in the OOOHead.
2022-09-20 10:05:50 -07:00
// This function is not thread safe unless the caller holds a lock.
2022-12-28 02:19:41 -08:00
// The caller must ensure that s.ooo is not nil.
2023-11-28 02:14:29 -08:00
func ( s * memSeries ) oooMergedChunks ( meta chunks . Meta , cdm * chunks . ChunkDiskMapper , mint , maxt int64 ) ( * mergedOOOChunks , error ) {
2022-09-20 10:05:50 -07:00
_ , cid := chunks . HeadChunkRef ( meta . Ref ) . Unpack ( )
// ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are
// incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index.
// The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix
// is len(s.mmappedChunks), it represents the next chunk, which is the head chunk.
2022-12-28 02:19:41 -08:00
ix := int ( cid ) - int ( s . ooo . firstOOOChunkID )
if ix < 0 || ix > len ( s . ooo . oooMmappedChunks ) {
2022-09-20 10:05:50 -07:00
return nil , storage . ErrNotFound
}
2022-12-28 02:19:41 -08:00
if ix == len ( s . ooo . oooMmappedChunks ) {
if s . ooo . oooHeadChunk == nil {
2022-09-20 10:05:50 -07:00
return nil , errors . New ( "invalid ooo head chunk" )
}
}
// We create a temporary slice of chunk metas to hold the information of all
// possible chunks that may overlap with the requested chunk.
2022-12-28 02:19:41 -08:00
tmpChks := make ( [ ] chunkMetaAndChunkDiskMapperRef , 0 , len ( s . ooo . oooMmappedChunks ) )
2022-09-20 10:05:50 -07:00
2022-12-28 02:19:41 -08:00
oooHeadRef := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( len ( s . ooo . oooMmappedChunks ) ) ) )
if s . ooo . oooHeadChunk != nil && s . ooo . oooHeadChunk . OverlapsClosedInterval ( mint , maxt ) {
2022-09-20 10:05:50 -07:00
// We only want to append the head chunk if this chunk existed when
// Series() was called. This brings consistency in case new data
// is added in between Series() and Chunk() calls.
if oooHeadRef == meta . OOOLastRef {
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
// Ignoring samples added before and after the last known min and max time for this chunk.
MinTime : meta . OOOLastMinTime ,
MaxTime : meta . OOOLastMaxTime ,
Ref : oooHeadRef ,
} ,
} )
}
}
2022-12-28 02:19:41 -08:00
for i , c := range s . ooo . oooMmappedChunks {
2022-09-20 10:05:50 -07:00
chunkRef := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( i ) ) )
// We can skip chunks that came in later than the last known OOOLastRef.
if chunkRef > meta . OOOLastRef {
break
}
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case chunkRef == meta . OOOLastRef :
2022-09-20 10:05:50 -07:00
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
MinTime : meta . OOOLastMinTime ,
MaxTime : meta . OOOLastMaxTime ,
Ref : chunkRef ,
} ,
ref : c . ref ,
origMinT : c . minTime ,
origMaxT : c . maxTime ,
} )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case c . OverlapsClosedInterval ( mint , maxt ) :
2022-09-20 10:05:50 -07:00
tmpChks = append ( tmpChks , chunkMetaAndChunkDiskMapperRef {
meta : chunks . Meta {
MinTime : c . minTime ,
MaxTime : c . maxTime ,
Ref : chunkRef ,
} ,
ref : c . ref ,
} )
}
}
// Next we want to sort all the collected chunks by min time so we can find
// those that overlap and stop when we know the rest don't.
2023-07-08 05:45:56 -07:00
slices . SortFunc ( tmpChks , refLessByMinTimeAndMinRef )
2022-09-20 10:05:50 -07:00
mc := & mergedOOOChunks { }
absoluteMax := int64 ( math . MinInt64 )
for _ , c := range tmpChks {
2023-11-28 02:14:29 -08:00
if c . meta . Ref != meta . Ref && ( len ( mc . chunkIterables ) == 0 || c . meta . MinTime > absoluteMax ) {
2022-09-20 10:05:50 -07:00
continue
}
2023-11-28 02:14:29 -08:00
var iterable chunkenc . Iterable
2022-09-20 10:05:50 -07:00
if c . meta . Ref == oooHeadRef {
var xor * chunkenc . XORChunk
2023-11-28 02:14:29 -08:00
var err error
2022-09-20 10:05:50 -07:00
// If head chunk min and max time match the meta OOO markers
// that means that the chunk has not expanded so we can append
// it as it is.
2022-12-28 02:19:41 -08:00
if s . ooo . oooHeadChunk . minTime == meta . OOOLastMinTime && s . ooo . oooHeadChunk . maxTime == meta . OOOLastMaxTime {
xor , err = s . ooo . oooHeadChunk . chunk . ToXOR ( ) // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
2022-09-20 10:05:50 -07:00
} else {
// We need to remove samples that are outside of the markers
2022-12-28 02:19:41 -08:00
xor , err = s . ooo . oooHeadChunk . chunk . ToXORBetweenTimestamps ( meta . OOOLastMinTime , meta . OOOLastMaxTime )
2022-09-20 10:05:50 -07:00
}
if err != nil {
2023-11-16 10:54:41 -08:00
return nil , fmt . Errorf ( "failed to convert ooo head chunk to xor chunk: %w" , err )
2022-09-20 10:05:50 -07:00
}
2023-11-28 02:14:29 -08:00
iterable = xor
2022-09-20 10:05:50 -07:00
} else {
chk , err := cdm . Chunk ( c . ref )
if err != nil {
2023-11-16 10:54:41 -08:00
var cerr * chunks . CorruptionErr
if errors . As ( err , & cerr ) {
return nil , fmt . Errorf ( "invalid ooo mmapped chunk: %w" , err )
2022-09-20 10:05:50 -07:00
}
return nil , err
}
if c . meta . Ref == meta . OOOLastRef &&
( c . origMinT != meta . OOOLastMinTime || c . origMaxT != meta . OOOLastMaxTime ) {
// The head expanded and was memory mapped so now we need to
// wrap the chunk within a chunk that doesnt allows us to iterate
// through samples out of the OOOLastMinT and OOOLastMaxT
// markers.
2023-11-28 02:14:29 -08:00
iterable = boundedIterable { chk , meta . OOOLastMinTime , meta . OOOLastMaxTime }
2022-09-20 10:05:50 -07:00
} else {
2023-11-28 02:14:29 -08:00
iterable = chk
2022-09-20 10:05:50 -07:00
}
}
2023-11-28 02:14:29 -08:00
mc . chunkIterables = append ( mc . chunkIterables , iterable )
2022-09-20 10:05:50 -07:00
if c . meta . MaxTime > absoluteMax {
absoluteMax = c . meta . MaxTime
}
}
return mc , nil
}
2023-11-28 02:14:29 -08:00
var _ chunkenc . Iterable = & mergedOOOChunks { }
2022-09-20 10:05:50 -07:00
2023-11-28 02:14:29 -08:00
// mergedOOOChunks holds the list of iterables for overlapping chunks.
2022-09-20 10:05:50 -07:00
type mergedOOOChunks struct {
2023-11-28 02:14:29 -08:00
chunkIterables [ ] chunkenc . Iterable
2022-09-20 10:05:50 -07:00
}
func ( o mergedOOOChunks ) Iterator ( iterator chunkenc . Iterator ) chunkenc . Iterator {
2023-11-28 02:14:29 -08:00
return storage . ChainSampleIteratorFromIterables ( iterator , o . chunkIterables )
2022-09-20 10:05:50 -07:00
}
2023-11-28 02:14:29 -08:00
var _ chunkenc . Iterable = & boundedIterable { }
2022-09-20 10:05:50 -07:00
2023-11-28 02:14:29 -08:00
// boundedIterable is an implementation of chunkenc.Iterable that uses a
2022-09-20 10:05:50 -07:00
// boundedIterator that only iterates through samples which timestamps are
2023-10-03 13:09:25 -07:00
// >= minT and <= maxT.
2023-11-28 02:14:29 -08:00
type boundedIterable struct {
chunk chunkenc . Chunk
minT int64
maxT int64
2022-09-20 10:05:50 -07:00
}
2023-11-28 02:14:29 -08:00
func ( b boundedIterable ) Iterator ( iterator chunkenc . Iterator ) chunkenc . Iterator {
it := b . chunk . Iterator ( iterator )
2022-09-20 10:05:50 -07:00
if it == nil {
panic ( "iterator shouldn't be nil" )
}
return boundedIterator { it , b . minT , b . maxT }
}
var _ chunkenc . Iterator = & boundedIterator { }
// boundedIterator is an implementation of Iterator that only iterates through
2023-10-03 13:09:25 -07:00
// samples which timestamps are >= minT and <= maxT.
2022-09-20 10:05:50 -07:00
type boundedIterator struct {
chunkenc . Iterator
minT int64
maxT int64
}
// Next the first time its called it will advance as many positions as necessary
// until its able to find a sample within the bounds minT and maxT.
// If there are samples within bounds it will advance one by one amongst them.
// If there are no samples within bounds it will return false.
2022-10-11 09:35:35 -07:00
func ( b boundedIterator ) Next ( ) chunkenc . ValueType {
for b . Iterator . Next ( ) == chunkenc . ValFloat {
2022-09-20 10:05:50 -07:00
t , _ := b . Iterator . At ( )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case t < b . minT :
2022-09-20 10:05:50 -07:00
continue
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case t > b . maxT :
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
default :
return chunkenc . ValFloat
2022-09-20 10:05:50 -07:00
}
}
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
2022-10-11 09:35:35 -07:00
func ( b boundedIterator ) Seek ( t int64 ) chunkenc . ValueType {
2022-09-20 10:05:50 -07:00
if t < b . minT {
// We must seek at least up to b.minT if it is asked for something before that.
2022-10-11 09:35:35 -07:00
val := b . Iterator . Seek ( b . minT )
if ! ( val == chunkenc . ValFloat ) {
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
t , _ := b . Iterator . At ( )
2022-10-11 09:35:35 -07:00
if t <= b . maxT {
return chunkenc . ValFloat
}
2022-09-20 10:05:50 -07:00
}
if t > b . maxT {
// We seek anyway so that the subsequent Next() calls will also return false.
b . Iterator . Seek ( t )
2022-10-11 09:35:35 -07:00
return chunkenc . ValNone
2022-09-20 10:05:50 -07:00
}
return b . Iterator . Seek ( t )
}
2023-10-03 13:09:25 -07:00
// safeHeadChunk makes sure that the chunk can be accessed without a race condition.
2023-05-19 01:24:06 -07:00
type safeHeadChunk struct {
2021-08-03 05:14:26 -07:00
chunkenc . Chunk
2023-02-21 01:30:11 -08:00
s * memSeries
cid chunks . HeadChunkID
isoState * isolationState
2021-08-03 05:14:26 -07:00
}
2023-05-19 01:24:06 -07:00
func ( c * safeHeadChunk ) Iterator ( reuseIter chunkenc . Iterator ) chunkenc . Iterator {
2021-08-03 05:14:26 -07:00
c . s . Lock ( )
2023-02-21 01:30:11 -08:00
it := c . s . iterator ( c . cid , c . Chunk , c . isoState , reuseIter )
2021-08-03 05:14:26 -07:00
c . s . Unlock ( )
return it
}
2021-11-17 05:05:10 -08:00
// iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range.
2021-08-03 05:14:26 -07:00
// It is unsafe to call this concurrently with s.append(...) without holding the series lock.
2023-02-21 01:30:11 -08:00
func ( s * memSeries ) iterator ( id chunks . HeadChunkID , c chunkenc . Chunk , isoState * isolationState , it chunkenc . Iterator ) chunkenc . Iterator {
2021-11-17 05:05:10 -08:00
ix := int ( id ) - int ( s . firstChunkID )
2021-08-03 05:14:26 -07:00
2023-02-21 01:30:11 -08:00
numSamples := c . NumSamples ( )
2021-08-03 05:14:26 -07:00
stopAfter := numSamples
2021-11-19 02:11:32 -08:00
if isoState != nil && ! isoState . IsolationDisabled ( ) {
2021-08-03 05:14:26 -07:00
totalSamples := 0 // Total samples in this series.
previousSamples := 0 // Samples before this chunk.
for j , d := range s . mmappedChunks {
totalSamples += int ( d . numSamples )
if j < ix {
previousSamples += int ( d . numSamples )
}
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
ix -= len ( s . mmappedChunks )
if s . headChunks != nil {
// Iterate all head chunks from the oldest to the newest.
headChunksLen := s . headChunks . len ( )
for j := headChunksLen - 1 ; j >= 0 ; j -- {
chk := s . headChunks . atOffset ( j )
chkSamples := chk . chunk . NumSamples ( )
totalSamples += chkSamples
// Chunk ID is len(s.mmappedChunks) + $(headChunks list position).
// Where $(headChunks list position) is zero for the oldest chunk and $(s.headChunks.len() - 1)
// for the newest (open) chunk.
if headChunksLen - 1 - j < ix {
previousSamples += chkSamples
}
}
2021-08-03 05:14:26 -07:00
}
// Removing the extra transactionIDs that are relevant for samples that
// come after this chunk, from the total transactionIDs.
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
appendIDsToConsider := int ( s . txs . txIDCount ) - ( totalSamples - ( previousSamples + numSamples ) )
2021-08-03 05:14:26 -07:00
// Iterate over the appendIDs, find the first one that the isolation state says not
// to return.
it := s . txs . iterator ( )
for index := 0 ; index < appendIDsToConsider ; index ++ {
appendID := it . At ( )
if appendID <= isoState . maxAppendID { // Easy check first.
if _ , ok := isoState . incompleteAppends [ appendID ] ; ! ok {
it . Next ( )
continue
}
}
stopAfter = numSamples - ( appendIDsToConsider - index )
if stopAfter < 0 {
stopAfter = 0 // Stopped in a previous chunk.
}
break
}
}
if stopAfter == 0 {
return chunkenc . NewNopIterator ( )
}
2022-09-27 07:02:05 -07:00
if stopAfter == numSamples {
2023-02-21 01:30:11 -08:00
return c . Iterator ( it )
2021-08-03 05:14:26 -07:00
}
2023-02-21 01:30:11 -08:00
return makeStopIterator ( c , it , stopAfter )
2021-08-03 05:14:26 -07:00
}
2021-11-17 02:21:27 -08:00
// stopIterator wraps an Iterator, but only returns the first
// stopAfter values, if initialized with i=-1.
2021-08-03 05:14:26 -07:00
type stopIterator struct {
chunkenc . Iterator
i , stopAfter int
}
2021-11-28 23:54:23 -08:00
func ( it * stopIterator ) Next ( ) chunkenc . ValueType {
2021-08-03 05:14:26 -07:00
if it . i + 1 >= it . stopAfter {
2021-11-28 23:54:23 -08:00
return chunkenc . ValNone
2021-08-03 05:14:26 -07:00
}
it . i ++
return it . Iterator . Next ( )
}
2022-10-05 13:14:49 -07:00
func makeStopIterator ( c chunkenc . Chunk , it chunkenc . Iterator , stopAfter int ) chunkenc . Iterator {
// Re-use the Iterator object if it is a stopIterator.
if stopIter , ok := it . ( * stopIterator ) ; ok {
stopIter . Iterator = c . Iterator ( stopIter . Iterator )
stopIter . i = - 1
stopIter . stopAfter = stopAfter
return stopIter
}
return & stopIterator {
Iterator : c . Iterator ( it ) ,
i : - 1 ,
stopAfter : stopAfter ,
}
}