2021-08-03 05:14:26 -07:00
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tsdb
import (
"context"
2023-11-16 10:54:41 -08:00
"errors"
2021-08-03 05:14:26 -07:00
"fmt"
"math"
"github.com/go-kit/log/level"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/exemplar"
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
"github.com/prometheus/prometheus/model/histogram"
2021-11-08 06:23:17 -08:00
"github.com/prometheus/prometheus/model/labels"
2022-07-19 01:58:52 -07:00
"github.com/prometheus/prometheus/model/metadata"
2021-11-17 10:57:31 -08:00
"github.com/prometheus/prometheus/model/value"
2021-08-03 05:14:26 -07:00
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
)
// initAppender is a helper to initialize the time bounds of the head
// upon the first sample it receives.
type initAppender struct {
app storage . Appender
head * Head
}
var _ storage . GetRef = & initAppender { }
2021-11-06 03:10:04 -07:00
func ( a * initAppender ) Append ( ref storage . SeriesRef , lset labels . Labels , t int64 , v float64 ) ( storage . SeriesRef , error ) {
2021-08-03 05:14:26 -07:00
if a . app != nil {
return a . app . Append ( ref , lset , t , v )
}
a . head . initTime ( t )
a . app = a . head . appender ( )
return a . app . Append ( ref , lset , t , v )
}
2021-11-06 03:10:04 -07:00
func ( a * initAppender ) AppendExemplar ( ref storage . SeriesRef , l labels . Labels , e exemplar . Exemplar ) ( storage . SeriesRef , error ) {
2021-08-03 05:14:26 -07:00
// Check if exemplar storage is enabled.
if ! a . head . opts . EnableExemplarStorage || a . head . opts . MaxExemplars . Load ( ) <= 0 {
return 0 , nil
}
if a . app != nil {
return a . app . AppendExemplar ( ref , l , e )
}
// We should never reach here given we would call Append before AppendExemplar
// and we probably want to always base head/WAL min time on sample times.
a . head . initTime ( e . Ts )
a . app = a . head . appender ( )
return a . app . AppendExemplar ( ref , l , e )
}
2022-12-28 00:55:07 -08:00
func ( a * initAppender ) AppendHistogram ( ref storage . SeriesRef , l labels . Labels , t int64 , h * histogram . Histogram , fh * histogram . FloatHistogram ) ( storage . SeriesRef , error ) {
2021-08-04 23:46:08 -07:00
if a . app != nil {
2022-12-28 00:55:07 -08:00
return a . app . AppendHistogram ( ref , l , t , h , fh )
2021-08-04 23:46:08 -07:00
}
a . head . initTime ( t )
a . app = a . head . appender ( )
2022-12-28 00:55:07 -08:00
return a . app . AppendHistogram ( ref , l , t , h , fh )
2021-08-04 23:46:08 -07:00
}
2022-07-19 01:58:52 -07:00
func ( a * initAppender ) UpdateMetadata ( ref storage . SeriesRef , l labels . Labels , m metadata . Metadata ) ( storage . SeriesRef , error ) {
if a . app != nil {
return a . app . UpdateMetadata ( ref , l , m )
}
a . app = a . head . appender ( )
return a . app . UpdateMetadata ( ref , l , m )
}
2023-12-11 00:43:42 -08:00
func ( a * initAppender ) AppendCTZeroSample ( ref storage . SeriesRef , lset labels . Labels , t , ct int64 ) ( storage . SeriesRef , error ) {
if a . app != nil {
return a . app . AppendCTZeroSample ( ref , lset , t , ct )
}
a . head . initTime ( t )
a . app = a . head . appender ( )
return a . app . AppendCTZeroSample ( ref , lset , t , ct )
}
2021-08-03 05:14:26 -07:00
// initTime initializes a head with the first timestamp. This only needs to be called
// for a completely fresh head with an empty WAL.
func ( h * Head ) initTime ( t int64 ) {
2022-09-09 04:28:55 -07:00
if ! h . minTime . CompareAndSwap ( math . MaxInt64 , t ) {
2021-08-03 05:14:26 -07:00
return
}
// Ensure that max time is initialized to at least the min time we just set.
// Concurrent appenders may already have set it to a higher value.
2022-09-09 04:28:55 -07:00
h . maxTime . CompareAndSwap ( math . MinInt64 , t )
2021-08-03 05:14:26 -07:00
}
2022-10-24 01:17:45 -07:00
func ( a * initAppender ) GetRef ( lset labels . Labels , hash uint64 ) ( storage . SeriesRef , labels . Labels ) {
2021-08-03 05:14:26 -07:00
if g , ok := a . app . ( storage . GetRef ) ; ok {
2022-10-24 01:17:45 -07:00
return g . GetRef ( lset , hash )
2021-08-03 05:14:26 -07:00
}
2022-03-09 14:17:40 -08:00
return 0 , labels . EmptyLabels ( )
2021-08-03 05:14:26 -07:00
}
func ( a * initAppender ) Commit ( ) error {
if a . app == nil {
2021-09-08 02:19:58 -07:00
a . head . metrics . activeAppenders . Dec ( )
2021-08-03 05:14:26 -07:00
return nil
}
return a . app . Commit ( )
}
func ( a * initAppender ) Rollback ( ) error {
if a . app == nil {
2021-09-08 02:19:58 -07:00
a . head . metrics . activeAppenders . Dec ( )
2021-08-03 05:14:26 -07:00
return nil
}
return a . app . Rollback ( )
}
// Appender returns a new Appender on the database.
func ( h * Head ) Appender ( _ context . Context ) storage . Appender {
h . metrics . activeAppenders . Inc ( )
// The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base.
if h . MinTime ( ) == math . MaxInt64 {
return & initAppender {
head : h ,
}
}
return h . appender ( )
}
func ( h * Head ) appender ( ) * headAppender {
2022-09-27 07:01:23 -07:00
minValidTime := h . appendableMinValidTime ( )
appendID , cleanupAppendIDsBelow := h . iso . newAppendID ( minValidTime ) // Every appender gets an ID that is cleared upon commit/rollback.
2021-08-03 05:14:26 -07:00
// Allocate the exemplars buffer only if exemplars are enabled.
var exemplarsBuf [ ] exemplarWithSeriesRef
if h . opts . EnableExemplarStorage {
exemplarsBuf = h . getExemplarBuffer ( )
}
return & headAppender {
head : h ,
2022-09-27 07:01:23 -07:00
minValidTime : minValidTime ,
2021-08-03 05:14:26 -07:00
mint : math . MaxInt64 ,
maxt : math . MinInt64 ,
2022-09-20 10:05:50 -07:00
headMaxt : h . MaxTime ( ) ,
oooTimeWindow : h . opts . OutOfOrderTimeWindow . Load ( ) ,
2021-08-03 05:14:26 -07:00
samples : h . getAppendBuffer ( ) ,
sampleSeries : h . getSeriesBuffer ( ) ,
exemplars : exemplarsBuf ,
2022-07-29 01:48:53 -07:00
histograms : h . getHistogramBuffer ( ) ,
2022-12-28 00:55:07 -08:00
floatHistograms : h . getFloatHistogramBuffer ( ) ,
2022-07-19 01:58:52 -07:00
metadata : h . getMetadataBuffer ( ) ,
2021-08-03 05:14:26 -07:00
appendID : appendID ,
cleanupAppendIDsBelow : cleanupAppendIDsBelow ,
}
}
2022-02-17 03:00:48 -08:00
// appendableMinValidTime returns the minimum valid timestamp for appends,
// such that samples stay ahead of prior blocks and the head compaction window.
2021-08-03 05:14:26 -07:00
func ( h * Head ) appendableMinValidTime ( ) int64 {
2022-02-17 03:00:48 -08:00
// This boundary ensures that no samples will be added to the compaction window.
// This allows race-free, concurrent appending and compaction.
cwEnd := h . MaxTime ( ) - h . chunkRange . Load ( ) / 2
// This boundary ensures that we avoid overlapping timeframes from one block to the next.
// While not necessary for correctness, it means we're not required to use vertical compaction.
minValid := h . minValidTime . Load ( )
return max ( cwEnd , minValid )
2021-08-03 05:14:26 -07:00
}
2021-11-03 00:39:54 -07:00
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.
// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet.
func ( h * Head ) AppendableMinValidTime ( ) ( int64 , bool ) {
if h . MinTime ( ) == math . MaxInt64 {
return 0 , false
}
return h . appendableMinValidTime ( ) , true
}
2021-08-03 05:14:26 -07:00
func max ( a , b int64 ) int64 {
if a > b {
return a
}
return b
}
func ( h * Head ) getAppendBuffer ( ) [ ] record . RefSample {
b := h . appendPool . Get ( )
if b == nil {
return make ( [ ] record . RefSample , 0 , 512 )
}
2023-03-29 12:34:34 -07:00
return b
2021-08-03 05:14:26 -07:00
}
func ( h * Head ) putAppendBuffer ( b [ ] record . RefSample ) {
h . appendPool . Put ( b [ : 0 ] )
}
func ( h * Head ) getExemplarBuffer ( ) [ ] exemplarWithSeriesRef {
b := h . exemplarsPool . Get ( )
if b == nil {
return make ( [ ] exemplarWithSeriesRef , 0 , 512 )
}
2023-03-29 12:34:34 -07:00
return b
2021-08-03 05:14:26 -07:00
}
func ( h * Head ) putExemplarBuffer ( b [ ] exemplarWithSeriesRef ) {
if b == nil {
return
}
h . exemplarsPool . Put ( b [ : 0 ] )
}
2022-08-29 05:08:36 -07:00
func ( h * Head ) getHistogramBuffer ( ) [ ] record . RefHistogramSample {
2022-07-29 01:48:53 -07:00
b := h . histogramsPool . Get ( )
if b == nil {
2022-08-29 05:08:36 -07:00
return make ( [ ] record . RefHistogramSample , 0 , 512 )
2022-07-29 01:48:53 -07:00
}
2023-03-29 12:34:34 -07:00
return b
2022-07-29 01:48:53 -07:00
}
2022-08-29 05:08:36 -07:00
func ( h * Head ) putHistogramBuffer ( b [ ] record . RefHistogramSample ) {
2022-07-29 01:48:53 -07:00
h . histogramsPool . Put ( b [ : 0 ] )
}
2022-12-28 00:55:07 -08:00
func ( h * Head ) getFloatHistogramBuffer ( ) [ ] record . RefFloatHistogramSample {
b := h . floatHistogramsPool . Get ( )
if b == nil {
return make ( [ ] record . RefFloatHistogramSample , 0 , 512 )
}
2023-03-29 12:34:34 -07:00
return b
2022-12-28 00:55:07 -08:00
}
func ( h * Head ) putFloatHistogramBuffer ( b [ ] record . RefFloatHistogramSample ) {
h . floatHistogramsPool . Put ( b [ : 0 ] )
}
2022-07-19 01:58:52 -07:00
func ( h * Head ) getMetadataBuffer ( ) [ ] record . RefMetadata {
b := h . metadataPool . Get ( )
if b == nil {
return make ( [ ] record . RefMetadata , 0 , 512 )
}
2023-03-29 12:34:34 -07:00
return b
2022-07-19 01:58:52 -07:00
}
func ( h * Head ) putMetadataBuffer ( b [ ] record . RefMetadata ) {
h . metadataPool . Put ( b [ : 0 ] )
}
2021-08-03 05:14:26 -07:00
func ( h * Head ) getSeriesBuffer ( ) [ ] * memSeries {
b := h . seriesPool . Get ( )
if b == nil {
return make ( [ ] * memSeries , 0 , 512 )
}
2023-03-29 12:34:34 -07:00
return b
2021-08-03 05:14:26 -07:00
}
func ( h * Head ) putSeriesBuffer ( b [ ] * memSeries ) {
h . seriesPool . Put ( b [ : 0 ] )
}
func ( h * Head ) getBytesBuffer ( ) [ ] byte {
b := h . bytesPool . Get ( )
if b == nil {
return make ( [ ] byte , 0 , 1024 )
}
2023-03-29 12:34:34 -07:00
return b
2021-08-03 05:14:26 -07:00
}
func ( h * Head ) putBytesBuffer ( b [ ] byte ) {
h . bytesPool . Put ( b [ : 0 ] )
}
type exemplarWithSeriesRef struct {
2021-11-06 03:10:04 -07:00
ref storage . SeriesRef
2021-08-03 05:14:26 -07:00
exemplar exemplar . Exemplar
}
type headAppender struct {
2022-09-20 10:05:50 -07:00
head * Head
minValidTime int64 // No samples below this timestamp are allowed.
mint , maxt int64
headMaxt int64 // We track it here to not take the lock for every sample appended.
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
2021-08-03 05:14:26 -07:00
2022-12-28 00:55:07 -08:00
series [ ] record . RefSeries // New series held by this appender.
samples [ ] record . RefSample // New float samples held by this appender.
sampleSeries [ ] * memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
histograms [ ] record . RefHistogramSample // New histogram samples held by this appender.
histogramSeries [ ] * memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
floatHistograms [ ] record . RefFloatHistogramSample // New float histogram samples held by this appender.
floatHistogramSeries [ ] * memSeries // FloatHistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
metadata [ ] record . RefMetadata // New metadata held by this appender.
metadataSeries [ ] * memSeries // Series corresponding to the metadata held by this appender.
exemplars [ ] exemplarWithSeriesRef // New exemplars held by this appender.
2021-08-03 05:14:26 -07:00
appendID , cleanupAppendIDsBelow uint64
closed bool
}
2021-11-06 03:10:04 -07:00
func ( a * headAppender ) Append ( ref storage . SeriesRef , lset labels . Labels , t int64 , v float64 ) ( storage . SeriesRef , error ) {
2022-09-20 10:05:50 -07:00
// For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append.
// If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work.
if a . oooTimeWindow == 0 && t < a . minValidTime {
2022-10-11 09:35:35 -07:00
a . head . metrics . outOfBoundSamples . WithLabelValues ( sampleMetricTypeFloat ) . Inc ( )
2021-08-03 05:14:26 -07:00
return 0 , storage . ErrOutOfBounds
}
2021-11-06 03:10:04 -07:00
s := a . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
2021-08-03 05:14:26 -07:00
if s == nil {
var err error
2023-12-11 00:43:42 -08:00
s , err = a . getOrCreate ( lset )
2021-08-03 05:14:26 -07:00
if err != nil {
return 0 , err
}
}
2022-12-28 01:18:39 -08:00
if value . IsStaleNaN ( v ) {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case s . lastHistogramValue != nil :
2022-12-28 01:18:39 -08:00
return a . AppendHistogram ( ref , lset , t , & histogram . Histogram { Sum : v } , nil )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case s . lastFloatHistogramValue != nil :
2022-12-28 01:18:39 -08:00
return a . AppendHistogram ( ref , lset , t , nil , & histogram . FloatHistogram { Sum : v } )
}
2021-10-01 01:11:51 -07:00
}
2021-08-03 05:14:26 -07:00
s . Lock ( )
2022-09-20 10:05:50 -07:00
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_ , delta , err := s . appendable ( t , v , a . headMaxt , a . minValidTime , a . oooTimeWindow )
if err == nil {
s . pendingCommit = true
}
s . Unlock ( )
if delta > 0 {
2023-02-21 05:07:27 -08:00
a . head . metrics . oooHistogram . Observe ( float64 ( delta ) / 1000 )
2022-09-20 10:05:50 -07:00
}
if err != nil {
2023-11-16 10:54:41 -08:00
switch {
case errors . Is ( err , storage . ErrOutOfOrderSample ) :
2022-10-11 09:35:35 -07:00
a . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeFloat ) . Inc ( )
2023-11-16 10:54:41 -08:00
case errors . Is ( err , storage . ErrTooOldSample ) :
2022-10-11 09:35:35 -07:00
a . head . metrics . tooOldSamples . WithLabelValues ( sampleMetricTypeFloat ) . Inc ( )
2021-08-03 05:14:26 -07:00
}
return 0 , err
}
if t < a . mint {
a . mint = t
}
if t > a . maxt {
a . maxt = t
}
a . samples = append ( a . samples , record . RefSample {
Ref : s . ref ,
T : t ,
V : v ,
} )
a . sampleSeries = append ( a . sampleSeries , s )
2021-11-06 03:10:04 -07:00
return storage . SeriesRef ( s . ref ) , nil
2021-08-03 05:14:26 -07:00
}
2023-12-11 00:43:42 -08:00
// AppendCTZeroSample appends synthetic zero sample for ct timestamp. It returns
// error when sample can't be appended. See
// storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation.
func ( a * headAppender ) AppendCTZeroSample ( ref storage . SeriesRef , lset labels . Labels , t , ct int64 ) ( storage . SeriesRef , error ) {
if ct >= t {
return 0 , fmt . Errorf ( "CT is newer or the same as sample's timestamp, ignoring" )
}
s := a . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
var err error
s , err = a . getOrCreate ( lset )
if err != nil {
return 0 , err
}
}
// Check if CT wouldn't be OOO vs samples we already might have for this series.
// NOTE(bwplotka): This will be often hit as it's expected for long living
// counters to share the same CT.
s . Lock ( )
isOOO , _ , err := s . appendable ( ct , 0 , a . headMaxt , a . minValidTime , a . oooTimeWindow )
if err == nil {
s . pendingCommit = true
}
s . Unlock ( )
if err != nil {
return 0 , err
}
if isOOO {
return storage . SeriesRef ( s . ref ) , storage . ErrOutOfOrderCT
}
if ct > a . maxt {
a . maxt = ct
}
a . samples = append ( a . samples , record . RefSample { Ref : s . ref , T : ct , V : 0.0 } )
a . sampleSeries = append ( a . sampleSeries , s )
return storage . SeriesRef ( s . ref ) , nil
}
func ( a * headAppender ) getOrCreate ( lset labels . Labels ) ( * memSeries , error ) {
// Ensure no empty labels have gotten through.
lset = lset . WithoutEmpty ( )
if lset . IsEmpty ( ) {
2023-11-16 10:54:41 -08:00
return nil , fmt . Errorf ( "empty labelset: %w" , ErrInvalidSample )
2023-12-11 00:43:42 -08:00
}
if l , dup := lset . HasDuplicateLabelNames ( ) ; dup {
2023-11-16 10:54:41 -08:00
return nil , fmt . Errorf ( ` label name "%s" is not unique: %w ` , l , ErrInvalidSample )
2023-12-11 00:43:42 -08:00
}
var created bool
var err error
s , created , err := a . head . getOrCreate ( lset . Hash ( ) , lset )
if err != nil {
return nil , err
}
if created {
a . series = append ( a . series , record . RefSeries {
Ref : s . ref ,
Labels : lset ,
} )
}
return s , nil
}
2022-09-20 10:05:50 -07:00
// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error)
// The sample belongs to the out of order chunk if we return true and no error.
// An error signifies the sample cannot be handled.
func ( s * memSeries ) appendable ( t int64 , v float64 , headMaxt , minValidTime , oooTimeWindow int64 ) ( isOOO bool , oooDelta int64 , err error ) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if s . headChunks == nil {
2022-09-20 10:05:50 -07:00
// The series has no sample and was freshly created.
return false , 0 , nil
}
msMaxt := s . maxTime ( )
if t > msMaxt {
return false , 0 , nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
2022-09-27 07:02:05 -07:00
if math . Float64bits ( s . lastValue ) != math . Float64bits ( v ) {
2022-09-20 10:05:50 -07:00
return false , 0 , storage . ErrDuplicateSampleForTimestamp
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false , 0 , nil
}
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt - oooTimeWindow {
return true , headMaxt - t , nil
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true , headMaxt - t , storage . ErrTooOldSample
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
if t < minValidTime {
return false , headMaxt - t , storage . ErrOutOfBounds
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
return false , headMaxt - t , storage . ErrOutOfOrderSample
2021-08-03 05:14:26 -07:00
}
2023-04-07 09:03:19 -07:00
// appendableHistogram checks whether the given histogram is valid for appending to the series.
2021-11-12 10:07:41 -08:00
func ( s * memSeries ) appendableHistogram ( t int64 , h * histogram . Histogram ) error {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if s . headChunks == nil {
2021-08-04 23:46:08 -07:00
return nil
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if t > s . headChunks . maxTime {
2021-08-04 23:46:08 -07:00
return nil
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if t < s . headChunks . maxTime {
2021-08-04 23:46:08 -07:00
return storage . ErrOutOfOrderSample
}
2022-09-19 00:40:30 -07:00
2021-08-04 23:46:08 -07:00
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
2022-10-11 09:35:35 -07:00
if ! h . Equals ( s . lastHistogramValue ) {
2022-09-19 00:40:30 -07:00
return storage . ErrDuplicateSampleForTimestamp
}
2021-08-04 23:46:08 -07:00
return nil
}
2023-04-07 09:03:19 -07:00
// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series.
2022-12-28 00:55:07 -08:00
func ( s * memSeries ) appendableFloatHistogram ( t int64 , fh * histogram . FloatHistogram ) error {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if s . headChunks == nil {
2022-12-28 00:55:07 -08:00
return nil
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if t > s . headChunks . maxTime {
2022-12-28 00:55:07 -08:00
return nil
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
if t < s . headChunks . maxTime {
2022-12-28 00:55:07 -08:00
return storage . ErrOutOfOrderSample
}
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
if ! fh . Equals ( s . lastFloatHistogramValue ) {
return storage . ErrDuplicateSampleForTimestamp
}
return nil
}
2021-08-03 05:14:26 -07:00
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
2022-11-28 09:09:18 -08:00
// use getOrCreate or make any of the lset validity checks that Append does.
2021-11-06 03:10:04 -07:00
func ( a * headAppender ) AppendExemplar ( ref storage . SeriesRef , lset labels . Labels , e exemplar . Exemplar ) ( storage . SeriesRef , error ) {
2021-08-03 05:14:26 -07:00
// Check if exemplar storage is enabled.
if ! a . head . opts . EnableExemplarStorage || a . head . opts . MaxExemplars . Load ( ) <= 0 {
return 0 , nil
}
2021-10-21 13:58:40 -07:00
// Get Series
2021-11-06 03:10:04 -07:00
s := a . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
2021-10-21 13:58:40 -07:00
if s == nil {
s = a . head . series . getByHash ( lset . Hash ( ) , lset )
if s != nil {
2021-11-06 03:10:04 -07:00
ref = storage . SeriesRef ( s . ref )
2021-10-21 13:58:40 -07:00
}
}
2021-08-03 05:14:26 -07:00
if s == nil {
2021-11-06 03:10:04 -07:00
return 0 , fmt . Errorf ( "unknown HeadSeriesRef when trying to add exemplar: %d" , ref )
2021-08-03 05:14:26 -07:00
}
// Ensure no empty labels have gotten through.
e . Labels = e . Labels . WithoutEmpty ( )
err := a . head . exemplars . ValidateExemplar ( s . lset , e )
if err != nil {
2023-11-16 10:54:41 -08:00
if errors . Is ( err , storage . ErrDuplicateExemplar ) || errors . Is ( err , storage . ErrExemplarsDisabled ) {
2021-08-03 05:14:26 -07:00
// Duplicate, don't return an error but don't accept the exemplar.
return 0 , nil
}
return 0 , err
}
a . exemplars = append ( a . exemplars , exemplarWithSeriesRef { ref , e } )
2021-11-06 03:10:04 -07:00
return storage . SeriesRef ( s . ref ) , nil
2021-08-03 05:14:26 -07:00
}
2022-12-28 00:55:07 -08:00
func ( a * headAppender ) AppendHistogram ( ref storage . SeriesRef , lset labels . Labels , t int64 , h * histogram . Histogram , fh * histogram . FloatHistogram ) ( storage . SeriesRef , error ) {
2022-09-14 05:08:34 -07:00
if ! a . head . opts . EnableNativeHistograms . Load ( ) {
return 0 , storage . ErrNativeHistogramsDisabled
}
2021-08-04 23:46:08 -07:00
if t < a . minValidTime {
2022-10-03 00:53:28 -07:00
a . head . metrics . outOfBoundSamples . WithLabelValues ( sampleMetricTypeHistogram ) . Inc ( )
2021-08-04 23:46:08 -07:00
return 0 , storage . ErrOutOfBounds
}
2022-12-28 00:55:07 -08:00
if h != nil {
2023-11-03 07:47:59 -07:00
if err := h . Validate ( ) ; err != nil {
2022-12-28 00:55:07 -08:00
return 0 , err
}
}
if fh != nil {
2023-11-03 07:47:59 -07:00
if err := fh . Validate ( ) ; err != nil {
2022-12-28 00:55:07 -08:00
return 0 , err
}
2022-07-29 07:52:49 -07:00
}
2021-11-17 10:57:31 -08:00
s := a . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
2021-08-04 23:46:08 -07:00
if s == nil {
// Ensure no empty labels have gotten through.
lset = lset . WithoutEmpty ( )
2022-03-09 14:17:40 -08:00
if lset . IsEmpty ( ) {
2023-11-16 10:54:41 -08:00
return 0 , fmt . Errorf ( "empty labelset: %w" , ErrInvalidSample )
2021-08-04 23:46:08 -07:00
}
if l , dup := lset . HasDuplicateLabelNames ( ) ; dup {
2023-11-16 10:54:41 -08:00
return 0 , fmt . Errorf ( ` label name "%s" is not unique: %w ` , l , ErrInvalidSample )
2021-08-04 23:46:08 -07:00
}
var created bool
var err error
s , created , err = a . head . getOrCreate ( lset . Hash ( ) , lset )
if err != nil {
return 0 , err
}
if created {
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case h != nil :
2022-12-28 01:01:55 -08:00
s . lastHistogramValue = & histogram . Histogram { }
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case fh != nil :
2022-12-28 01:01:55 -08:00
s . lastFloatHistogramValue = & histogram . FloatHistogram { }
}
2021-08-04 23:46:08 -07:00
a . series = append ( a . series , record . RefSeries {
Ref : s . ref ,
Labels : lset ,
} )
}
}
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case h != nil :
2022-12-28 00:55:07 -08:00
s . Lock ( )
if err := s . appendableHistogram ( t , h ) ; err != nil {
s . Unlock ( )
2023-11-16 10:54:41 -08:00
if errors . Is ( err , storage . ErrOutOfOrderSample ) {
2022-12-28 00:55:07 -08:00
a . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeHistogram ) . Inc ( )
}
return 0 , err
}
s . pendingCommit = true
2021-08-04 23:46:08 -07:00
s . Unlock ( )
2022-12-28 00:55:07 -08:00
a . histograms = append ( a . histograms , record . RefHistogramSample {
Ref : s . ref ,
T : t ,
H : h ,
} )
a . histogramSeries = append ( a . histogramSeries , s )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
case fh != nil :
2022-12-28 00:55:07 -08:00
s . Lock ( )
if err := s . appendableFloatHistogram ( t , fh ) ; err != nil {
s . Unlock ( )
2023-11-16 10:54:41 -08:00
if errors . Is ( err , storage . ErrOutOfOrderSample ) {
2022-12-28 00:55:07 -08:00
a . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeHistogram ) . Inc ( )
}
return 0 , err
2021-08-04 23:46:08 -07:00
}
2022-12-28 00:55:07 -08:00
s . pendingCommit = true
s . Unlock ( )
a . floatHistograms = append ( a . floatHistograms , record . RefFloatHistogramSample {
Ref : s . ref ,
T : t ,
FH : fh ,
} )
a . floatHistogramSeries = append ( a . floatHistogramSeries , s )
2021-08-04 23:46:08 -07:00
}
if t < a . mint {
a . mint = t
}
if t > a . maxt {
a . maxt = t
}
2021-11-17 10:57:31 -08:00
return storage . SeriesRef ( s . ref ) , nil
2021-08-04 23:46:08 -07:00
}
2022-07-19 01:58:52 -07:00
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
// use getOrCreate or make any of the lset sanity checks that Append does.
func ( a * headAppender ) UpdateMetadata ( ref storage . SeriesRef , lset labels . Labels , meta metadata . Metadata ) ( storage . SeriesRef , error ) {
s := a . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
s = a . head . series . getByHash ( lset . Hash ( ) , lset )
if s != nil {
ref = storage . SeriesRef ( s . ref )
}
}
if s == nil {
return 0 , fmt . Errorf ( "unknown series when trying to add metadata with HeadSeriesRef: %d and labels: %s" , ref , lset )
}
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
s . Lock ( )
2022-08-17 03:02:28 -07:00
hasNewMetadata := s . meta == nil || * s . meta != meta
remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-02-02 10:38:50 -08:00
s . Unlock ( )
2022-07-19 01:58:52 -07:00
if hasNewMetadata {
a . metadata = append ( a . metadata , record . RefMetadata {
Ref : s . ref ,
Type : record . GetMetricType ( meta . Type ) ,
Unit : meta . Unit ,
Help : meta . Help ,
} )
a . metadataSeries = append ( a . metadataSeries , s )
}
return ref , nil
}
2021-08-03 05:14:26 -07:00
var _ storage . GetRef = & headAppender { }
2022-10-24 01:17:45 -07:00
func ( a * headAppender ) GetRef ( lset labels . Labels , hash uint64 ) ( storage . SeriesRef , labels . Labels ) {
s := a . head . series . getByHash ( hash , lset )
2021-08-03 05:14:26 -07:00
if s == nil {
2022-03-09 14:17:40 -08:00
return 0 , labels . EmptyLabels ( )
2021-08-03 05:14:26 -07:00
}
// returned labels must be suitable to pass to Append()
2021-11-06 03:10:04 -07:00
return storage . SeriesRef ( s . ref ) , s . lset
2021-08-03 05:14:26 -07:00
}
2021-11-17 02:21:27 -08:00
// log writes all headAppender's data to the WAL.
2021-08-03 05:14:26 -07:00
func ( a * headAppender ) log ( ) error {
if a . head . wal == nil {
return nil
}
buf := a . head . getBytesBuffer ( )
defer func ( ) { a . head . putBytesBuffer ( buf ) } ( )
var rec [ ] byte
var enc record . Encoder
if len ( a . series ) > 0 {
rec = enc . Series ( a . series , buf )
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log series: %w" , err )
2021-08-03 05:14:26 -07:00
}
}
2022-07-19 01:58:52 -07:00
if len ( a . metadata ) > 0 {
rec = enc . Metadata ( a . metadata , buf )
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log metadata: %w" , err )
2022-07-19 01:58:52 -07:00
}
}
2021-08-03 05:14:26 -07:00
if len ( a . samples ) > 0 {
rec = enc . Samples ( a . samples , buf )
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log samples: %w" , err )
2021-08-03 05:14:26 -07:00
}
}
2021-08-11 05:08:48 -07:00
if len ( a . histograms ) > 0 {
2022-08-29 05:08:36 -07:00
rec = enc . HistogramSamples ( a . histograms , buf )
2021-08-11 05:08:48 -07:00
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log histograms: %w" , err )
2021-08-11 05:08:48 -07:00
}
}
2022-12-28 00:55:07 -08:00
if len ( a . floatHistograms ) > 0 {
rec = enc . FloatHistogramSamples ( a . floatHistograms , buf )
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log float histograms: %w" , err )
2022-12-28 00:55:07 -08:00
}
}
2023-11-11 08:30:16 -08:00
// Exemplars should be logged after samples (float/native histogram/etc),
// otherwise it might happen that we send the exemplars in a remote write
// batch before the samples, which in turn means the exemplar is rejected
// for missing series, since series are created due to samples.
if len ( a . exemplars ) > 0 {
rec = enc . Exemplars ( exemplarsForEncoding ( a . exemplars ) , buf )
buf = rec [ : 0 ]
if err := a . head . wal . Log ( rec ) ; err != nil {
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "log exemplars: %w" , err )
2023-11-11 08:30:16 -08:00
}
}
2021-08-03 05:14:26 -07:00
return nil
}
func exemplarsForEncoding ( es [ ] exemplarWithSeriesRef ) [ ] record . RefExemplar {
ret := make ( [ ] record . RefExemplar , 0 , len ( es ) )
for _ , e := range es {
ret = append ( ret , record . RefExemplar {
2021-11-06 03:10:04 -07:00
Ref : chunks . HeadSeriesRef ( e . ref ) ,
2021-08-03 05:14:26 -07:00
T : e . exemplar . Ts ,
V : e . exemplar . Value ,
Labels : e . exemplar . Labels ,
} )
}
return ret
}
2021-11-17 02:21:27 -08:00
// Commit writes to the WAL and adds the data to the Head.
2022-09-20 10:05:50 -07:00
// TODO(codesome): Refactor this method to reduce indentation and make it more readable.
2021-08-03 05:14:26 -07:00
func ( a * headAppender ) Commit ( ) ( err error ) {
if a . closed {
return ErrAppenderClosed
}
defer func ( ) { a . closed = true } ( )
if err := a . log ( ) ; err != nil {
_ = a . Rollback ( ) // Most likely the same error will happen again.
2023-11-16 10:54:41 -08:00
return fmt . Errorf ( "write to WAL: %w" , err )
2021-08-03 05:14:26 -07:00
}
2023-05-15 12:31:49 -07:00
if a . head . writeNotified != nil {
a . head . writeNotified . Notify ( )
}
2021-08-03 05:14:26 -07:00
// No errors logging to WAL, so pass the exemplars along to the in memory storage.
for _ , e := range a . exemplars {
2021-11-06 03:10:04 -07:00
s := a . head . series . getByID ( chunks . HeadSeriesRef ( e . ref ) )
2023-11-12 06:51:37 -08:00
if s == nil {
// This is very unlikely to happen, but we have seen it in the wild.
// It means that the series was truncated between AppendExemplar and Commit.
// See TestHeadCompactionWhileAppendAndCommitExemplar.
continue
}
2021-08-03 05:14:26 -07:00
// We don't instrument exemplar appends here, all is instrumented by storage.
if err := a . head . exemplars . AddExemplar ( s . lset , e . exemplar ) ; err != nil {
2023-11-16 10:54:41 -08:00
if errors . Is ( err , storage . ErrOutOfOrderExemplar ) {
2021-08-03 05:14:26 -07:00
continue
}
level . Debug ( a . head . logger ) . Log ( "msg" , "Unknown error while adding exemplar" , "err" , err )
}
}
defer a . head . metrics . activeAppenders . Dec ( )
defer a . head . putAppendBuffer ( a . samples )
defer a . head . putSeriesBuffer ( a . sampleSeries )
defer a . head . putExemplarBuffer ( a . exemplars )
2022-07-29 01:48:53 -07:00
defer a . head . putHistogramBuffer ( a . histograms )
2022-12-28 00:55:07 -08:00
defer a . head . putFloatHistogramBuffer ( a . floatHistograms )
2022-07-19 01:58:52 -07:00
defer a . head . putMetadataBuffer ( a . metadata )
2021-08-03 05:14:26 -07:00
defer a . head . iso . closeAppend ( a . appendID )
2022-09-20 10:05:50 -07:00
var (
samplesAppended = len ( a . samples )
oooAccepted int // number of samples out of order but accepted: with ooo enabled and within time window
oooRejected int // number of samples rejected due to: out of order but OOO support disabled.
tooOldRejected int // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window)
oobRejected int // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
inOrderMint int64 = math . MaxInt64
inOrderMaxt int64 = math . MinInt64
ooomint int64 = math . MaxInt64
ooomaxt int64 = math . MinInt64
wblSamples [ ] record . RefSample
oooMmapMarkers map [ chunks . HeadSeriesRef ] chunks . ChunkDiskMapperRef
oooRecords [ ] [ ] byte
2022-09-27 01:22:22 -07:00
oooCapMax = a . head . opts . OutOfOrderCapMax . Load ( )
2022-09-20 10:05:50 -07:00
series * memSeries
2023-05-25 13:12:32 -07:00
appendChunkOpts = chunkOpts {
chunkDiskMapper : a . head . chunkDiskMapper ,
chunkRange : a . head . chunkRange . Load ( ) ,
samplesPerChunk : a . head . opts . SamplesPerChunk ,
}
enc record . Encoder
2022-09-20 10:05:50 -07:00
)
defer func ( ) {
for i := range oooRecords {
a . head . putBytesBuffer ( oooRecords [ i ] [ : 0 ] )
}
} ( )
collectOOORecords := func ( ) {
if a . head . wbl == nil {
// WBL is not enabled. So no need to collect.
wblSamples = nil
oooMmapMarkers = nil
return
}
// The m-map happens before adding a new sample. So we collect
// the m-map markers first, and then samples.
// WBL Graphically:
// WBL Before this Commit(): [old samples before this commit for chunk 1]
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
if oooMmapMarkers != nil {
markers := make ( [ ] record . RefMmapMarker , 0 , len ( oooMmapMarkers ) )
for ref , mmapRef := range oooMmapMarkers {
markers = append ( markers , record . RefMmapMarker {
Ref : ref ,
MmapRef : mmapRef ,
} )
}
r := enc . MmapMarkers ( markers , a . head . getBytesBuffer ( ) )
oooRecords = append ( oooRecords , r )
}
if len ( wblSamples ) > 0 {
r := enc . Samples ( wblSamples , a . head . getBytesBuffer ( ) )
oooRecords = append ( oooRecords , r )
}
wblSamples = nil
oooMmapMarkers = nil
}
2021-08-03 05:14:26 -07:00
for i , s := range a . samples {
series = a . sampleSeries [ i ]
series . Lock ( )
2022-09-20 10:05:50 -07:00
oooSample , _ , err := series . appendable ( s . T , s . V , a . headMaxt , a . minValidTime , a . oooTimeWindow )
2023-11-16 10:54:41 -08:00
switch {
case err == nil :
2023-08-01 05:04:45 -07:00
// Do nothing.
2023-11-16 10:54:41 -08:00
case errors . Is ( err , storage . ErrOutOfOrderSample ) :
2022-09-20 10:05:50 -07:00
samplesAppended --
oooRejected ++
2023-11-16 10:54:41 -08:00
case errors . Is ( err , storage . ErrOutOfBounds ) :
2022-09-20 10:05:50 -07:00
samplesAppended --
oobRejected ++
2023-11-16 10:54:41 -08:00
case errors . Is ( err , storage . ErrTooOldSample ) :
2022-09-20 10:05:50 -07:00
samplesAppended --
tooOldRejected ++
default :
samplesAppended --
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
var ok , chunkCreated bool
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
switch {
case err != nil :
// Do nothing here.
case oooSample :
2022-09-20 10:05:50 -07:00
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRef chunks . ChunkDiskMapperRef
2022-09-27 01:22:22 -07:00
ok , chunkCreated , mmapRef = series . insert ( s . T , s . V , a . head . chunkDiskMapper , oooCapMax )
2022-09-20 10:05:50 -07:00
if chunkCreated {
r , ok := oooMmapMarkers [ series . ref ]
if ! ok || r != 0 {
// !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker.
// r != 0 means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records.
collectOOORecords ( )
}
if oooMmapMarkers == nil {
oooMmapMarkers = make ( map [ chunks . HeadSeriesRef ] chunks . ChunkDiskMapperRef )
}
oooMmapMarkers [ series . ref ] = mmapRef
}
if ok {
wblSamples = append ( wblSamples , s )
if s . T < ooomint {
ooomint = s . T
}
if s . T > ooomaxt {
ooomaxt = s . T
}
oooAccepted ++
} else {
// Sample is an exact duplicate of the last sample.
// NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk,
// not with samples in already flushed OOO chunks.
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
samplesAppended --
}
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
default :
2023-05-25 13:12:32 -07:00
ok , chunkCreated = series . append ( s . T , s . V , a . appendID , appendChunkOpts )
2022-09-20 10:05:50 -07:00
if ok {
if s . T < inOrderMint {
inOrderMint = s . T
}
if s . T > inOrderMaxt {
inOrderMaxt = s . T
}
} else {
// The sample is an exact duplicate, and should be silently dropped.
samplesAppended --
}
2021-08-04 23:46:08 -07:00
}
2022-09-20 10:05:50 -07:00
2021-08-04 23:46:08 -07:00
if chunkCreated {
a . head . metrics . chunks . Inc ( )
a . head . metrics . chunksCreated . Inc ( )
}
2022-09-20 10:05:50 -07:00
series . cleanupAppendIDsBelow ( a . cleanupAppendIDsBelow )
series . pendingCommit = false
series . Unlock ( )
2021-08-04 23:46:08 -07:00
}
2022-10-11 09:35:35 -07:00
histogramsTotal := len ( a . histograms )
histoOOORejected := 0
for i , s := range a . histograms {
series = a . histogramSeries [ i ]
series . Lock ( )
2023-05-25 13:12:32 -07:00
ok , chunkCreated := series . appendHistogram ( s . T , s . H , a . appendID , appendChunkOpts )
2022-10-11 09:35:35 -07:00
series . cleanupAppendIDsBelow ( a . cleanupAppendIDsBelow )
series . pendingCommit = false
series . Unlock ( )
if ok {
if s . T < inOrderMint {
inOrderMint = s . T
}
if s . T > inOrderMaxt {
inOrderMaxt = s . T
}
} else {
histogramsTotal --
histoOOORejected ++
}
if chunkCreated {
a . head . metrics . chunks . Inc ( )
a . head . metrics . chunksCreated . Inc ( )
}
}
2022-12-28 00:55:07 -08:00
histogramsTotal += len ( a . floatHistograms )
for i , s := range a . floatHistograms {
series = a . floatHistogramSeries [ i ]
series . Lock ( )
2023-05-25 13:12:32 -07:00
ok , chunkCreated := series . appendFloatHistogram ( s . T , s . FH , a . appendID , appendChunkOpts )
2022-12-28 00:55:07 -08:00
series . cleanupAppendIDsBelow ( a . cleanupAppendIDsBelow )
series . pendingCommit = false
series . Unlock ( )
if ok {
if s . T < inOrderMint {
inOrderMint = s . T
}
if s . T > inOrderMaxt {
inOrderMaxt = s . T
}
} else {
histogramsTotal --
histoOOORejected ++
}
if chunkCreated {
a . head . metrics . chunks . Inc ( )
a . head . metrics . chunksCreated . Inc ( )
}
}
2022-07-19 01:58:52 -07:00
for i , m := range a . metadata {
series = a . metadataSeries [ i ]
series . Lock ( )
2023-11-22 06:39:21 -08:00
series . meta = & metadata . Metadata { Type : record . ToMetricType ( m . Type ) , Unit : m . Unit , Help : m . Help }
2022-07-19 01:58:52 -07:00
series . Unlock ( )
}
2022-10-11 09:35:35 -07:00
a . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeFloat ) . Add ( float64 ( oooRejected ) )
a . head . metrics . outOfOrderSamples . WithLabelValues ( sampleMetricTypeHistogram ) . Add ( float64 ( histoOOORejected ) )
a . head . metrics . outOfBoundSamples . WithLabelValues ( sampleMetricTypeFloat ) . Add ( float64 ( oobRejected ) )
a . head . metrics . tooOldSamples . WithLabelValues ( sampleMetricTypeFloat ) . Add ( float64 ( tooOldRejected ) )
a . head . metrics . samplesAppended . WithLabelValues ( sampleMetricTypeFloat ) . Add ( float64 ( samplesAppended ) )
2022-10-03 00:53:28 -07:00
a . head . metrics . samplesAppended . WithLabelValues ( sampleMetricTypeHistogram ) . Add ( float64 ( histogramsTotal ) )
2022-09-20 10:05:50 -07:00
a . head . metrics . outOfOrderSamplesAppended . Add ( float64 ( oooAccepted ) )
a . head . updateMinMaxTime ( inOrderMint , inOrderMaxt )
a . head . updateMinOOOMaxOOOTime ( ooomint , ooomaxt )
collectOOORecords ( )
if a . head . wbl != nil {
if err := a . head . wbl . Log ( oooRecords ... ) ; err != nil {
// TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging
// until we have found what samples become OOO. We can try having a metric for this failure.
// Returning the error here is not correct because we have already put the samples into the memory,
// hence the append/insert was a success.
level . Error ( a . head . logger ) . Log ( "msg" , "Failed to log out of order samples into the WAL" , "err" , err )
}
}
2021-08-03 05:14:26 -07:00
return nil
}
2022-09-20 10:05:50 -07:00
// insert is like append, except it inserts. Used for OOO samples.
2022-09-27 01:22:22 -07:00
func ( s * memSeries ) insert ( t int64 , v float64 , chunkDiskMapper * chunks . ChunkDiskMapper , oooCapMax int64 ) ( inserted , chunkCreated bool , mmapRef chunks . ChunkDiskMapperRef ) {
2022-12-28 02:19:41 -08:00
if s . ooo == nil {
s . ooo = & memSeriesOOOFields { }
}
c := s . ooo . oooHeadChunk
2022-09-27 01:22:22 -07:00
if c == nil || c . chunk . NumSamples ( ) == int ( oooCapMax ) {
2022-09-20 10:05:50 -07:00
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c , mmapRef = s . cutNewOOOHeadChunk ( t , chunkDiskMapper )
chunkCreated = true
}
ok := c . chunk . Insert ( t , v )
if ok {
if chunkCreated || t < c . minTime {
c . minTime = t
}
if chunkCreated || t > c . maxTime {
c . maxTime = t
}
}
return ok , chunkCreated , mmapRef
}
2023-05-25 13:12:32 -07:00
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
type chunkOpts struct {
chunkDiskMapper * chunks . ChunkDiskMapper
chunkRange int64
samplesPerChunk int
}
2021-08-03 05:14:26 -07:00
// append adds the sample (t, v) to the series. The caller also has to provide
// the appendID for isolation. (The appendID can be zero, which results in no
// isolation for this append.)
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
2023-05-25 13:12:32 -07:00
func ( s * memSeries ) append ( t int64 , v float64 , appendID uint64 , o chunkOpts ) ( sampleInOrder , chunkCreated bool ) {
2023-05-25 13:35:09 -07:00
c , sampleInOrder , chunkCreated := s . appendPreprocessor ( t , chunkenc . EncXOR , o )
2021-08-04 23:46:08 -07:00
if ! sampleInOrder {
return sampleInOrder , chunkCreated
}
s . app . Append ( t , v )
c . maxTime = t
2022-10-11 09:35:35 -07:00
s . lastValue = v
2022-12-28 01:01:55 -08:00
s . lastHistogramValue = nil
s . lastFloatHistogramValue = nil
2021-08-04 23:46:08 -07:00
if appendID > 0 {
s . txs . add ( appendID )
}
return true , chunkCreated
}
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
// appendHistogram adds the histogram.
2021-08-04 23:46:08 -07:00
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
2023-07-26 06:08:16 -07:00
// In case of recoding the existing chunk, a new chunk is allocated and the old chunk is dropped.
// To keep the meaning of prometheus_tsdb_head_chunks and prometheus_tsdb_head_chunks_created_total
// consistent, we return chunkCreated=false in this case.
2023-05-25 13:12:32 -07:00
func ( s * memSeries ) appendHistogram ( t int64 , h * histogram . Histogram , appendID uint64 , o chunkOpts ) ( sampleInOrder , chunkCreated bool ) {
2022-07-06 05:34:02 -07:00
// Head controls the execution of recoding, so that we own the proper
2023-07-26 06:08:16 -07:00
// chunk reference afterwards and mmap used up chunks.
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevApp , _ := s . app . ( * chunkenc . HistogramAppender )
2023-08-24 06:21:17 -07:00
c , sampleInOrder , chunkCreated := s . histogramsAppendPreprocessor ( t , chunkenc . EncHistogram , o )
2021-08-04 23:46:08 -07:00
if ! sampleInOrder {
return sampleInOrder , chunkCreated
}
2022-07-06 05:34:02 -07:00
2023-07-26 06:08:16 -07:00
var (
newChunk chunkenc . Chunk
recoded bool
)
2021-08-04 23:46:08 -07:00
2023-07-26 06:08:16 -07:00
if ! chunkCreated {
// Ignore the previous appender if we continue the current chunk.
prevApp = nil
2021-10-06 03:32:19 -07:00
}
2023-07-26 06:08:16 -07:00
newChunk , recoded , s . app , _ = s . app . AppendHistogram ( prevApp , t , h , false ) // false=request a new chunk if needed
2021-08-04 23:46:08 -07:00
2022-10-11 09:35:35 -07:00
s . lastHistogramValue = h
2022-12-28 01:01:55 -08:00
s . lastFloatHistogramValue = nil
2021-08-04 23:46:08 -07:00
if appendID > 0 {
s . txs . add ( appendID )
}
2023-07-26 06:08:16 -07:00
if newChunk == nil { // Sample was appended to existing chunk or is the first sample in a new chunk.
c . maxTime = t
return true , chunkCreated
}
if recoded { // The appender needed to recode the chunk.
c . maxTime = t
c . chunk = newChunk
return true , false
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . headChunks = & memChunk {
2023-07-26 06:08:16 -07:00
chunk : newChunk ,
minTime : t ,
maxTime : t ,
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
prev : s . headChunks ,
2023-07-26 06:08:16 -07:00
}
s . nextAt = rangeForTimestamp ( t , o . chunkRange )
return true , true
2021-08-04 23:46:08 -07:00
}
2022-12-28 00:55:07 -08:00
// appendFloatHistogram adds the float histogram.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
2023-07-26 06:08:16 -07:00
// In case of recoding the existing chunk, a new chunk is allocated and the old chunk is dropped.
// To keep the meaning of prometheus_tsdb_head_chunks and prometheus_tsdb_head_chunks_created_total
// consistent, we return chunkCreated=false in this case.
2023-05-25 13:12:32 -07:00
func ( s * memSeries ) appendFloatHistogram ( t int64 , fh * histogram . FloatHistogram , appendID uint64 , o chunkOpts ) ( sampleInOrder , chunkCreated bool ) {
2022-12-28 00:55:07 -08:00
// Head controls the execution of recoding, so that we own the proper
2023-07-26 06:08:16 -07:00
// chunk reference afterwards and mmap used up chunks.
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevApp , _ := s . app . ( * chunkenc . FloatHistogramAppender )
2023-08-24 06:21:17 -07:00
c , sampleInOrder , chunkCreated := s . histogramsAppendPreprocessor ( t , chunkenc . EncFloatHistogram , o )
2022-12-28 00:55:07 -08:00
if ! sampleInOrder {
return sampleInOrder , chunkCreated
}
2023-07-26 06:08:16 -07:00
var (
newChunk chunkenc . Chunk
recoded bool
)
2022-12-28 00:55:07 -08:00
2023-07-26 06:08:16 -07:00
if ! chunkCreated {
// Ignore the previous appender if we continue the current chunk.
prevApp = nil
2022-12-28 00:55:07 -08:00
}
2023-07-26 06:08:16 -07:00
newChunk , recoded , s . app , _ = s . app . AppendFloatHistogram ( prevApp , t , fh , false ) // False means request a new chunk if needed.
2022-12-28 00:55:07 -08:00
2022-12-28 01:01:55 -08:00
s . lastHistogramValue = nil
2023-07-26 06:08:16 -07:00
s . lastFloatHistogramValue = fh
2022-12-28 00:55:07 -08:00
if appendID > 0 {
s . txs . add ( appendID )
}
2023-07-26 06:08:16 -07:00
if newChunk == nil { // Sample was appended to existing chunk or is the first sample in a new chunk.
c . maxTime = t
return true , chunkCreated
}
if recoded { // The appender needed to recode the chunk.
c . maxTime = t
c . chunk = newChunk
return true , false
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . headChunks = & memChunk {
2023-07-26 06:08:16 -07:00
chunk : newChunk ,
minTime : t ,
maxTime : t ,
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
prev : s . headChunks ,
2023-07-26 06:08:16 -07:00
}
s . nextAt = rangeForTimestamp ( t , o . chunkRange )
return true , true
2022-12-28 00:55:07 -08:00
}
2023-08-24 06:21:17 -07:00
// appendPreprocessor takes care of cutting new XOR chunks and m-mapping old ones. XOR chunks are cut based on the
// number of samples they contain with a soft cap in bytes.
2021-08-04 23:46:08 -07:00
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// This should be called only when appending data.
2023-05-25 13:35:09 -07:00
func ( s * memSeries ) appendPreprocessor ( t int64 , e chunkenc . Encoding , o chunkOpts ) ( c * memChunk , sampleInOrder , chunkCreated bool ) {
2023-08-24 06:21:17 -07:00
// We target chunkenc.MaxBytesPerXORChunk as a hard for the size of an XOR chunk. We must determine whether to cut
// a new head chunk without knowing the size of the next sample, however, so we assume the next sample will be a
// maximally-sized sample (19 bytes).
const maxBytesPerXORChunk = chunkenc . MaxBytesPerXORChunk - 19
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c = s . headChunks
2021-08-03 05:14:26 -07:00
if c == nil {
if len ( s . mmappedChunks ) > 0 && s . mmappedChunks [ len ( s . mmappedChunks ) - 1 ] . maxTime >= t {
2021-11-17 02:21:27 -08:00
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
2021-08-04 23:46:08 -07:00
return c , false , false
2021-08-03 05:14:26 -07:00
}
2022-09-20 10:05:50 -07:00
// There is no head chunk in this series yet, create the first chunk for the sample.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
2021-08-03 05:14:26 -07:00
chunkCreated = true
}
// Out of order sample.
if c . maxTime >= t {
2021-08-04 23:46:08 -07:00
return c , false , chunkCreated
2021-08-03 05:14:26 -07:00
}
2021-08-11 03:13:17 -07:00
2023-09-20 06:54:00 -07:00
// Check the chunk size, unless we just created it and if the chunk is too large, cut a new one.
if ! chunkCreated && len ( c . chunk . Bytes ( ) ) > maxBytesPerXORChunk {
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
chunkCreated = true
}
2021-11-26 04:13:27 -08:00
if c . chunk . Encoding ( ) != e {
// The chunk encoding expected by this append is different than the head chunk's
// encoding. So we cut a new chunk with the expected encoding.
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
2021-11-26 04:13:27 -08:00
chunkCreated = true
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
2021-11-26 04:13:27 -08:00
}
2021-08-11 03:13:17 -07:00
numSamples := c . chunk . NumSamples ( )
if numSamples == 0 {
// It could be the new chunk created after reading the chunk snapshot,
// hence we fix the minTime of the chunk here.
c . minTime = t
2023-05-25 13:35:09 -07:00
s . nextAt = rangeForTimestamp ( c . minTime , o . chunkRange )
2021-08-11 03:13:17 -07:00
}
// If we reach 25% of a chunk's desired sample count, predict an end time
// for this chunk that will try to make samples equally distributed within
// the remaining chunks in the current chunk range.
2021-08-03 05:14:26 -07:00
// At latest it must happen at the timestamp set when the chunk was cut.
2023-05-25 13:35:09 -07:00
if numSamples == o . samplesPerChunk / 4 {
2023-08-24 06:21:17 -07:00
s . nextAt = computeChunkEndTime ( c . minTime , c . maxTime , s . nextAt , 4 )
2021-08-03 05:14:26 -07:00
}
2022-04-20 05:54:20 -07:00
// If numSamples > samplesPerChunk*2 then our previous prediction was invalid,
// most likely because samples rate has changed and now they are arriving more frequently.
// Since we assume that the rate is higher, we're being conservative and cutting at 2*samplesPerChunk
// as we expect more chunks to come.
// Note that next chunk will have its nextAt recalculated for the new rate.
2023-05-25 13:35:09 -07:00
if t >= s . nextAt || numSamples >= o . samplesPerChunk * 2 {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
2021-08-03 05:14:26 -07:00
chunkCreated = true
}
2022-10-11 09:35:35 -07:00
return c , true , chunkCreated
2021-08-03 05:14:26 -07:00
}
2023-08-24 06:21:17 -07:00
// histogramsAppendPreprocessor takes care of cutting new histogram chunks and m-mapping old ones. Histogram chunks are
// cut based on their size in bytes.
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
// This should be called only when appending data.
func ( s * memSeries ) histogramsAppendPreprocessor ( t int64 , e chunkenc . Encoding , o chunkOpts ) ( c * memChunk , sampleInOrder , chunkCreated bool ) {
c = s . headChunks
if c == nil {
if len ( s . mmappedChunks ) > 0 && s . mmappedChunks [ len ( s . mmappedChunks ) - 1 ] . maxTime >= t {
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
return c , false , false
}
// There is no head chunk in this series yet, create the first chunk for the sample.
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
chunkCreated = true
}
// Out of order sample.
if c . maxTime >= t {
return c , false , chunkCreated
}
if c . chunk . Encoding ( ) != e {
// The chunk encoding expected by this append is different than the head chunk's
// encoding. So we cut a new chunk with the expected encoding.
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
chunkCreated = true
}
numSamples := c . chunk . NumSamples ( )
targetBytes := chunkenc . TargetBytesPerHistogramChunk
numBytes := len ( c . chunk . Bytes ( ) )
if numSamples == 0 {
// It could be the new chunk created after reading the chunk snapshot,
// hence we fix the minTime of the chunk here.
c . minTime = t
s . nextAt = rangeForTimestamp ( c . minTime , o . chunkRange )
}
// Below, we will enforce chunkenc.MinSamplesPerHistogramChunk. There are, however, two cases that supersede it:
// - The current chunk range is ending before chunkenc.MinSamplesPerHistogramChunk will be satisfied.
// - s.nextAt was set while loading a chunk snapshot with the intent that a new chunk be cut on the next append.
var nextChunkRangeStart int64
if s . histogramChunkHasComputedEndTime {
nextChunkRangeStart = rangeForTimestamp ( c . minTime , o . chunkRange )
} else {
// If we haven't yet computed an end time yet, s.nextAt is either set to
// rangeForTimestamp(c.minTime, o.chunkRange) or was set while loading a chunk snapshot. Either way, we want to
// skip enforcing chunkenc.MinSamplesPerHistogramChunk.
nextChunkRangeStart = s . nextAt
}
// If we reach 25% of a chunk's desired maximum size, predict an end time
// for this chunk that will try to make samples equally distributed within
// the remaining chunks in the current chunk range.
// At the latest it must happen at the timestamp set when the chunk was cut.
if ! s . histogramChunkHasComputedEndTime && numBytes >= targetBytes / 4 {
ratioToFull := float64 ( targetBytes ) / float64 ( numBytes )
s . nextAt = computeChunkEndTime ( c . minTime , c . maxTime , s . nextAt , ratioToFull )
s . histogramChunkHasComputedEndTime = true
}
// If numBytes > targetBytes*2 then our previous prediction was invalid. This could happen if the sample rate has
// increased or if the bucket/span count has increased.
// Note that next chunk will have its nextAt recalculated for the new rate.
if ( t >= s . nextAt || numBytes >= targetBytes * 2 ) && ( numSamples >= chunkenc . MinSamplesPerHistogramChunk || t >= nextChunkRangeStart ) {
c = s . cutNewHeadChunk ( t , e , o . chunkRange )
chunkCreated = true
}
// The new chunk will also need a new computed end time.
if chunkCreated {
s . histogramChunkHasComputedEndTime = false
}
return c , true , chunkCreated
}
2021-08-03 05:14:26 -07:00
// computeChunkEndTime estimates the end timestamp based the beginning of a
// chunk, its current timestamp and the upper bound up to which we insert data.
2023-08-24 06:21:17 -07:00
// It assumes that the time range is 1/ratioToFull full.
2021-08-03 05:14:26 -07:00
// Assuming that the samples will keep arriving at the same rate, it will make the
// remaining n chunks within this chunk range (before max) equally sized.
2023-08-24 06:21:17 -07:00
func computeChunkEndTime ( start , cur , max int64 , ratioToFull float64 ) int64 {
n := float64 ( max - start ) / ( float64 ( cur - start + 1 ) * ratioToFull )
2021-08-03 05:14:26 -07:00
if n <= 1 {
return max
}
2023-08-24 06:21:17 -07:00
return int64 ( float64 ( start ) + float64 ( max - start ) / math . Floor ( n ) )
2021-08-03 05:14:26 -07:00
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
func ( s * memSeries ) cutNewHeadChunk ( mint int64 , e chunkenc . Encoding , chunkRange int64 ) * memChunk {
// When cutting a new head chunk we create a new memChunk instance with .prev
// pointing at the current .headChunks, so it forms a linked list.
// All but first headChunks list elements will be m-mapped as soon as possible
// so this is a single element list most of the time.
s . headChunks = & memChunk {
2021-08-03 05:14:26 -07:00
minTime : mint ,
maxTime : math . MinInt64 ,
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
prev : s . headChunks ,
2021-08-03 05:14:26 -07:00
}
2021-08-04 23:46:08 -07:00
if chunkenc . IsValidEncoding ( e ) {
var err error
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . headChunks . chunk , err = chunkenc . NewEmptyChunk ( e )
2021-08-04 23:46:08 -07:00
if err != nil {
panic ( err ) // This should never happen.
}
} else {
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
s . headChunks . chunk = chunkenc . NewXORChunk ( )
2021-08-04 23:46:08 -07:00
}
2021-08-03 05:14:26 -07:00
// Set upper bound on when the next chunk must be started. An earlier timestamp
// may be chosen dynamically at a later point.
2022-09-27 01:22:22 -07:00
s . nextAt = rangeForTimestamp ( mint , chunkRange )
2021-08-03 05:14:26 -07:00
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
app , err := s . headChunks . chunk . Appender ( )
2021-08-03 05:14:26 -07:00
if err != nil {
panic ( err )
}
s . app = app
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
return s . headChunks
2021-08-03 05:14:26 -07:00
}
2022-12-28 02:19:41 -08:00
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil.
2022-09-20 10:05:50 -07:00
func ( s * memSeries ) cutNewOOOHeadChunk ( mint int64 , chunkDiskMapper * chunks . ChunkDiskMapper ) ( * oooHeadChunk , chunks . ChunkDiskMapperRef ) {
ref := s . mmapCurrentOOOHeadChunk ( chunkDiskMapper )
2022-12-28 02:19:41 -08:00
s . ooo . oooHeadChunk = & oooHeadChunk {
2022-09-20 10:05:50 -07:00
chunk : NewOOOChunk ( ) ,
minTime : mint ,
maxTime : math . MinInt64 ,
}
2022-12-28 02:19:41 -08:00
return s . ooo . oooHeadChunk , ref
2022-09-20 10:05:50 -07:00
}
func ( s * memSeries ) mmapCurrentOOOHeadChunk ( chunkDiskMapper * chunks . ChunkDiskMapper ) chunks . ChunkDiskMapperRef {
2022-12-28 02:19:41 -08:00
if s . ooo == nil || s . ooo . oooHeadChunk == nil {
2022-09-20 10:05:50 -07:00
// There is no head chunk, so nothing to m-map here.
return 0
}
2022-12-28 02:19:41 -08:00
xor , _ := s . ooo . oooHeadChunk . chunk . ToXOR ( ) // Encode to XorChunk which is more compact and implements all of the needed functionality.
2023-03-16 03:23:47 -07:00
chunkRef := chunkDiskMapper . WriteChunk ( s . ref , s . ooo . oooHeadChunk . minTime , s . ooo . oooHeadChunk . maxTime , xor , true , handleChunkWriteError )
2022-12-28 02:19:41 -08:00
s . ooo . oooMmappedChunks = append ( s . ooo . oooMmappedChunks , & mmappedChunk {
2022-09-20 10:05:50 -07:00
ref : chunkRef ,
numSamples : uint16 ( xor . NumSamples ( ) ) ,
2022-12-28 02:19:41 -08:00
minTime : s . ooo . oooHeadChunk . minTime ,
maxTime : s . ooo . oooHeadChunk . maxTime ,
2022-09-20 10:05:50 -07:00
} )
2022-12-28 02:19:41 -08:00
s . ooo . oooHeadChunk = nil
2022-09-20 10:05:50 -07:00
return chunkRef
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
// mmapChunks will m-map all but first chunk on s.headChunks list.
func ( s * memSeries ) mmapChunks ( chunkDiskMapper * chunks . ChunkDiskMapper ) ( count int ) {
if s . headChunks == nil || s . headChunks . prev == nil {
// There is none or only one head chunk, so nothing to m-map here.
2021-08-03 05:14:26 -07:00
return
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 02:10:24 -07:00
// Write chunks starting from the oldest one and stop before we get to current s.headChunk.
// If we have this chain: s.headChunk{t4} -> t3 -> t2 -> t1 -> t0
// then we need to write chunks t0 to t3, but skip s.headChunks.
for i := s . headChunks . len ( ) - 1 ; i > 0 ; i -- {
chk := s . headChunks . atOffset ( i )
chunkRef := chunkDiskMapper . WriteChunk ( s . ref , chk . minTime , chk . maxTime , chk . chunk , false , handleChunkWriteError )
s . mmappedChunks = append ( s . mmappedChunks , & mmappedChunk {
ref : chunkRef ,
numSamples : uint16 ( chk . chunk . NumSamples ( ) ) ,
minTime : chk . minTime ,
maxTime : chk . maxTime ,
} )
count ++
}
// Once we've written out all chunks except s.headChunks we need to unlink these from s.headChunk.
s . headChunks . prev = nil
return count
2021-08-03 05:14:26 -07:00
}
2022-01-10 05:36:45 -08:00
func handleChunkWriteError ( err error ) {
2023-11-16 10:54:41 -08:00
if err != nil && ! errors . Is ( err , chunks . ErrChunkDiskMapperClosed ) {
2022-01-10 05:36:45 -08:00
panic ( err )
}
}
2021-11-17 02:21:27 -08:00
// Rollback removes the samples and exemplars from headAppender and writes any series to WAL.
2021-08-03 05:14:26 -07:00
func ( a * headAppender ) Rollback ( ) ( err error ) {
if a . closed {
return ErrAppenderClosed
}
defer func ( ) { a . closed = true } ( )
defer a . head . metrics . activeAppenders . Dec ( )
defer a . head . iso . closeAppend ( a . appendID )
defer a . head . putSeriesBuffer ( a . sampleSeries )
var series * memSeries
for i := range a . samples {
series = a . sampleSeries [ i ]
series . Lock ( )
series . cleanupAppendIDsBelow ( a . cleanupAppendIDsBelow )
series . pendingCommit = false
series . Unlock ( )
}
2022-07-29 01:48:53 -07:00
for i := range a . histograms {
series = a . histogramSeries [ i ]
series . Lock ( )
series . cleanupAppendIDsBelow ( a . cleanupAppendIDsBelow )
series . pendingCommit = false
series . Unlock ( )
}
2021-08-03 05:14:26 -07:00
a . head . putAppendBuffer ( a . samples )
a . head . putExemplarBuffer ( a . exemplars )
2022-07-29 01:48:53 -07:00
a . head . putHistogramBuffer ( a . histograms )
2022-12-28 00:55:07 -08:00
a . head . putFloatHistogramBuffer ( a . floatHistograms )
2022-07-19 01:58:52 -07:00
a . head . putMetadataBuffer ( a . metadata )
2021-08-03 05:14:26 -07:00
a . samples = nil
a . exemplars = nil
2022-07-29 01:48:53 -07:00
a . histograms = nil
2022-07-19 01:58:52 -07:00
a . metadata = nil
2021-08-03 05:14:26 -07:00
// Series are created in the head memory regardless of rollback. Thus we have
// to log them to the WAL in any case.
return a . log ( )
}