2016-02-17 10:33:17 -08:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-02-01 01:55:07 -08:00
|
|
|
package scrape
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
import (
|
2017-02-22 04:00:51 -08:00
|
|
|
"bufio"
|
2017-01-15 08:33:07 -08:00
|
|
|
"bytes"
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2023-11-01 12:06:46 -07:00
|
|
|
"errors"
|
2016-02-28 10:21:50 -08:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-04-13 10:07:23 -07:00
|
|
|
"math"
|
2016-02-28 10:21:50 -08:00
|
|
|
"net/http"
|
2020-01-22 04:13:47 -08:00
|
|
|
"reflect"
|
2024-01-15 08:24:46 -08:00
|
|
|
"slices"
|
2021-06-18 00:38:12 -07:00
|
|
|
"strconv"
|
2023-10-10 03:16:55 -07:00
|
|
|
"strings"
|
2016-02-22 07:46:55 -08:00
|
|
|
"sync"
|
|
|
|
"time"
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2021-06-11 09:17:59 -07:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2022-05-31 02:46:49 -07:00
|
|
|
"github.com/klauspost/compress/gzip"
|
2018-04-25 10:19:06 -07:00
|
|
|
config_util "github.com/prometheus/common/config"
|
2017-09-08 05:34:45 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2017-02-28 05:59:33 -08:00
|
|
|
"github.com/prometheus/common/version"
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 02:56:09 -08:00
|
|
|
"github.com/prometheus/prometheus/config"
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 12:01:34 -08:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 06:57:07 -07:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-08-31 06:50:05 -07:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/model/relabel"
|
|
|
|
"github.com/prometheus/prometheus/model/textparse"
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
|
|
|
"github.com/prometheus/prometheus/model/value"
|
2016-02-22 07:46:55 -08:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2021-11-08 06:23:17 -08:00
|
|
|
"github.com/prometheus/prometheus/util/pool"
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2021-09-08 04:57:33 -07:00
|
|
|
// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps
|
|
|
|
// alignment, to enable better compression at the TSDB level.
|
2020-09-25 06:44:47 -07:00
|
|
|
// See https://github.com/prometheus/prometheus/issues/7846
|
2021-09-08 04:57:33 -07:00
|
|
|
var ScrapeTimestampTolerance = 2 * time.Millisecond
|
2020-10-07 09:25:52 -07:00
|
|
|
|
2020-10-07 12:44:36 -07:00
|
|
|
// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above.
|
2020-10-07 09:25:52 -07:00
|
|
|
var AlignScrapeTimestamps = true
|
2020-09-25 06:44:47 -07:00
|
|
|
|
2020-03-01 23:18:05 -08:00
|
|
|
var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName)
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
// scrapePool manages scrapes for sets of targets.
|
|
|
|
type scrapePool struct {
|
2020-02-06 07:58:38 -08:00
|
|
|
appendable storage.Appendable
|
2017-09-08 05:34:45 -07:00
|
|
|
logger log.Logger
|
2020-11-12 08:06:25 -08:00
|
|
|
cancel context.CancelFunc
|
2022-03-24 15:16:59 -07:00
|
|
|
httpOpts []config_util.HTTPClientOption
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2020-11-12 08:06:25 -08:00
|
|
|
// mtx must not be taken after targetMtx.
|
2021-07-27 03:48:55 -07:00
|
|
|
mtx sync.Mutex
|
|
|
|
config *config.ScrapeConfig
|
|
|
|
client *http.Client
|
|
|
|
loops map[uint64]loop
|
2020-10-26 07:46:20 -07:00
|
|
|
|
2023-11-24 11:46:26 -08:00
|
|
|
symbolTable *labels.SymbolTable
|
|
|
|
lastSymbolTableCheck time.Time
|
|
|
|
initialSymbolTableLen int
|
2023-11-23 11:02:37 -08:00
|
|
|
|
2020-11-12 08:06:25 -08:00
|
|
|
targetMtx sync.Mutex
|
|
|
|
// activeTargets and loops must always be synchronized to have the same
|
2016-02-28 10:56:18 -08:00
|
|
|
// set of hashes.
|
2023-08-14 07:39:25 -07:00
|
|
|
activeTargets map[uint64]*Target
|
|
|
|
droppedTargets []*Target // Subject to KeepDroppedTargets limit.
|
|
|
|
droppedTargetsCount int // Count of all dropped targets.
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
// Constructor for new scrape loops. This is settable for testing convenience.
|
2019-03-12 03:26:18 -07:00
|
|
|
newLoop func(scrapeLoopOptions) loop
|
2022-07-20 04:35:47 -07:00
|
|
|
|
|
|
|
noDefaultPort bool
|
2023-09-22 09:47:44 -07:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2024-08-26 02:41:56 -07:00
|
|
|
|
|
|
|
scrapeFailureLogger log.Logger
|
|
|
|
scrapeFailureLoggerMtx sync.RWMutex
|
2019-03-12 03:26:18 -07:00
|
|
|
}
|
|
|
|
|
2021-05-06 01:56:21 -07:00
|
|
|
type labelLimits struct {
|
|
|
|
labelLimit int
|
|
|
|
labelNameLengthLimit int
|
|
|
|
labelValueLengthLimit int
|
|
|
|
}
|
|
|
|
|
2019-03-12 03:26:18 -07:00
|
|
|
type scrapeLoopOptions struct {
|
2023-10-31 13:58:42 -07:00
|
|
|
target *Target
|
|
|
|
scraper scraper
|
|
|
|
sampleLimit int
|
|
|
|
bucketLimit int
|
2024-01-17 07:58:54 -08:00
|
|
|
maxSchema int32
|
2023-10-31 13:58:42 -07:00
|
|
|
labelLimits *labelLimits
|
|
|
|
honorLabels bool
|
|
|
|
honorTimestamps bool
|
|
|
|
trackTimestampsStaleness bool
|
|
|
|
interval time.Duration
|
|
|
|
timeout time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
2024-08-28 08:15:42 -07:00
|
|
|
validationScheme model.ValidationScheme
|
2023-12-11 00:43:42 -08:00
|
|
|
|
|
|
|
mrc []*relabel.Config
|
|
|
|
cache *scrapeCache
|
|
|
|
enableCompression bool
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
const maxAheadTime = 10 * time.Minute
|
|
|
|
|
2023-10-03 13:09:25 -07:00
|
|
|
// returning an empty label set is interpreted as "drop".
|
2017-09-08 05:34:45 -07:00
|
|
|
type labelsMutator func(labels.Labels) labels.Labels
|
|
|
|
|
2023-10-17 02:27:46 -07:00
|
|
|
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
|
2017-09-15 10:45:27 -07:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.NewNopLogger()
|
|
|
|
}
|
|
|
|
|
2022-07-20 04:35:47 -07:00
|
|
|
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
|
2016-02-28 10:21:50 -08:00
|
|
|
if err != nil {
|
2023-11-01 12:06:46 -07:00
|
|
|
return nil, fmt.Errorf("error creating HTTP client: %w", err)
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
2017-05-26 01:44:48 -07:00
|
|
|
|
2017-12-03 09:14:08 -08:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 05:34:45 -07:00
|
|
|
sp := &scrapePool{
|
2023-11-24 11:46:26 -08:00
|
|
|
cancel: cancel,
|
|
|
|
appendable: app,
|
|
|
|
config: cfg,
|
|
|
|
client: client,
|
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
symbolTable: labels.NewSymbolTable(),
|
|
|
|
lastSymbolTableCheck: time.Now(),
|
|
|
|
logger: logger,
|
|
|
|
metrics: metrics,
|
|
|
|
httpOpts: options.HTTPClientOptions,
|
|
|
|
noDefaultPort: options.NoDefaultPort,
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2019-03-12 03:26:18 -07:00
|
|
|
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
2018-05-18 00:32:11 -07:00
|
|
|
// Update the targets retrieval function for metadata to a new scrape cache.
|
2020-01-22 04:13:47 -08:00
|
|
|
cache := opts.cache
|
|
|
|
if cache == nil {
|
2023-09-22 09:47:44 -07:00
|
|
|
cache = newScrapeCache(metrics)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
2019-12-04 07:18:27 -08:00
|
|
|
opts.target.SetMetadataStore(cache)
|
2018-05-18 00:32:11 -07:00
|
|
|
|
2017-11-26 07:15:15 -08:00
|
|
|
return newScrapeLoop(
|
2022-04-14 06:18:46 -07:00
|
|
|
ctx,
|
2019-03-12 03:26:18 -07:00
|
|
|
opts.scraper,
|
|
|
|
log.With(logger, "target", opts.target),
|
2017-09-08 05:34:45 -07:00
|
|
|
buffers,
|
2019-03-12 03:26:18 -07:00
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
2021-12-10 04:03:28 -08:00
|
|
|
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
2018-05-18 00:32:11 -07:00
|
|
|
cache,
|
2023-11-24 10:08:56 -08:00
|
|
|
sp.symbolTable,
|
2023-05-25 02:49:43 -07:00
|
|
|
offsetSeed,
|
2019-03-15 03:04:15 -07:00
|
|
|
opts.honorTimestamps,
|
2023-10-31 13:58:42 -07:00
|
|
|
opts.trackTimestampsStaleness,
|
2023-11-20 04:02:53 -08:00
|
|
|
opts.enableCompression,
|
2021-09-03 06:37:42 -07:00
|
|
|
opts.sampleLimit,
|
2023-04-21 12:14:19 -07:00
|
|
|
opts.bucketLimit,
|
2024-01-17 07:58:54 -08:00
|
|
|
opts.maxSchema,
|
2021-05-06 01:56:21 -07:00
|
|
|
opts.labelLimits,
|
2021-08-31 08:37:32 -07:00
|
|
|
opts.interval,
|
|
|
|
opts.timeout,
|
2023-05-10 16:59:21 -07:00
|
|
|
opts.scrapeClassicHistograms,
|
2024-04-24 06:53:54 -07:00
|
|
|
options.EnableNativeHistogramsIngestion,
|
2023-12-11 00:43:42 -08:00
|
|
|
options.EnableCreatedTimestampZeroIngestion,
|
2022-07-20 04:35:47 -07:00
|
|
|
options.ExtraMetrics,
|
[PRW 2.0] Merging `remote-write-2.0` feature branch to main (PRW 2.0 support + metadata in WAL) (#14395)
* Remote Write 1.1: e2e benchmarks (#13102)
* Remote Write e2e benchmarks
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Prometheus ports automatically assigned
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make dashboard editable + more modular to different job label values
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Dashboard improvements
* memory stats
* diffs look at counter increases
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* run script: absolute path for config templates
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* grafana dashboard improvements
* show actual values of metrics
* add memory stats and diff
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* dashboard changes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
* replace snappy encoding library
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add new proto types
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add decode function for new write request proto
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add lookup table struct that is used to build the symbol table in new
write request format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Implement code paths for new proto format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update example server to include handler for new format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add new test client
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* tests and new -> original proto mapping util
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add new proto support on receiver end
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Fix test
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* no-brainer copypaste but more performance write support
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove some comented code
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix mocks and fixture
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add basic reduce remote write handler benchmark
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor out common code between write methods
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix: queue manager to include float histograms in new requests
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add sender-side tests and fix failing ones
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor queue manager code to remove some duplication
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix build
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Improve sender benchmarks and some allocations
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use github.com/golang/snappy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup: remove hardcoded fake url for testing
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add 1.1 version handling code
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Remove config, update proto
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* gofmt
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix NewWriteClient and change new flags wording
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fields rewording in handler
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remote write handler to checks version header
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix typo in log
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add minmized remote write proto format
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add functions for translating between new proto formats symbol table and
actual prometheus labels
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add functionality for new minimized remote write request format
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix minor things
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Make LabelSymbols a fixed32
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove unused proto type
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update tests
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix build for stringlabels tag
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use two uint32 to encode (offset,leng)
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* manually optimize varint marshaling
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Use unsafe []byte->string cast to reuse buffer
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix writeRequestMinimizedFixture
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove all code from previous interning approach
the 'minimized' version is now the only v1.1 version
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* minimally-tested exemplar support for rw 1.1
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* refactor new version flag to make it easier to pick a specific format
instead of having multiple flags, plus add new formats for testing
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use exp slices for backwards compat. to go 1.20 plus add copyright
header to test file
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix label ranging
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Add bytes slice (instead of slice of 32bit vars) format for testing
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* test additional len and lenbytes formats
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove mistaken package lock changes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove formats we've decided not to use
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove more format types we probably won't use
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* More cleanup
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use require instead of assert in custom marshal test
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup; remove some unused functions
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* more cleanup, mostly linting fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove package-lock.json change again
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* more cleanup, address review comments
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix test panic
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix minor lint issue + use labels Range function since it looks like
the tests fail to do `range labels.Labels` on CI
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* new interning format based on []string indeces
Co-authored-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove all new rw formats but the []string one
also adapt tests to the new format
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* cleanup rwSymbolTable
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* add some TODOs for later
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* don't reserve field 3 for new proto and add TODO
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix custom marshaling
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* additional merge fixes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint fixes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* fix server example
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* revert package-lock.json changes
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update example prometheus version
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* define separate proto types for remote write 2.0
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* rename new proto types and move to separate pkg
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* update prometheus version for example
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make proto
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* make Metadata not nullable
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* remove old MinSample proto message
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* change enum names to fit buf build recommend enum naming and lint rules
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* remote: Added test for classic histogram grouping when sending rw; Fixed queue manager test delay. (#13421)
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Remote write v2: metadata support in every write request (#13394)
* Approach bundling metadata along with samples and exemplars
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* Add first test; rebase with main
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* Alternative approach: bundle metadata in TimeSeries protobuf
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
* update go mod to match main branch
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* fix after rebase
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* we're not going to modify the 1.X format anymore
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Modify AppendMetadata based on the fact that we be putting metadata into
timeseries
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Rename enums for remote write versions to something that makes more
sense + remove the added `sendMetadata` flag.
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* rename flag that enables writing of metadata records to the WAL
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* additional clean up
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* lint
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* fix usage of require.Len
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* some clean up from review comments
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* more review fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com>
* remote write 2.0: sync with `main` branch (#13510)
* consoles: exclude iowait and steal from CPU Utilisation
'iowait' and 'steal' indicate specific idle/wait states, which shouldn't
be counted into CPU Utilisation. Also see
https://github.com/prometheus-operator/kube-prometheus/pull/796 and
https://github.com/kubernetes-monitoring/kubernetes-mixin/pull/667.
Per the iostat man page:
%idle
Show the percentage of time that the CPU or CPUs were idle and the
system did not have an outstanding disk I/O request.
%iowait
Show the percentage of time that the CPU or CPUs were idle during
which the system had an outstanding disk I/O request.
%steal
Show the percentage of time spent in involuntary wait by the
virtual CPU or CPUs while the hypervisor was servicing another
virtual processor.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
* tsdb: shrink txRing with smaller integers
4 billion active transactions ought to be enough for anyone.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: create isolation transaction slice on demand
When Prometheus restarts it creates every series read in from the WAL,
but many of those series will be finished, and never receive any more
samples. By defering allocation of the txRing slice to when it is first
needed, we save 32 bytes per stale series.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* add cluster variable to Overview dashboard
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* promql: simplify Native Histogram arithmetics
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
* Cut 2.49.0-rc.0 (#13270)
* Cut 2.49.0-rc.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed the duplicate.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Add unit protobuf parser
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Go on adding protobuf parsing for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* ui: create a reproduction for https://github.com/prometheus/prometheus/issues/13292
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Get conditional right
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Get VM Scale Set NIC (#13283)
Calling `*armnetwork.InterfacesClient.Get()` doesn't work for Scale Set
VM NIC, because these use a different Resource ID format.
Use `*armnetwork.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface()`
instead. This needs both the scale set name and the instance ID, so
add an `InstanceID` field to the `virtualMachine` struct. `InstanceID`
is empty for a VM that isn't a ScaleSetVM.
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
* Cut v2.49.0-rc.1
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Delete debugging lines, amend error message for unit
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Correct order in error message
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* Consider storage.ErrTooOldSample as non-retryable
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
* scrape_test.go: Increase scrape interval in TestScrapeLoopCache to reduce potential flakiness
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Avoid creating string for suffix, consider counters without _total suffix
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
* build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)
---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump actions/setup-node from 3.8.1 to 4.0.1
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.1.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8)
---
updated-dependencies:
- dependency-name: actions/setup-node
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
* scripts: sort file list in embed directive
Otherwise the resulting string depends on find, which afaict depends on
the underlying filesystem. A stable file list make it easier to detect
UI changes in downstreams that need to track UI assets.
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
* Fix DataTableProps['data'] for resultType string
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* Fix handling of scalar and string in isHeatmapData
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
* build(deps): bump github.com/influxdata/influxdb
Bumps [github.com/influxdata/influxdb](https://github.com/influxdata/influxdb) from 1.11.2 to 1.11.4.
- [Release notes](https://github.com/influxdata/influxdb/releases)
- [Commits](https://github.com/influxdata/influxdb/compare/v1.11.2...v1.11.4)
---
updated-dependencies:
- dependency-name: github.com/influxdata/influxdb
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* build(deps): bump github.com/prometheus/prometheus
Bumps [github.com/prometheus/prometheus](https://github.com/prometheus/prometheus) from 0.48.0 to 0.48.1.
- [Release notes](https://github.com/prometheus/prometheus/releases)
- [Changelog](https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/prometheus/compare/v0.48.0...v0.48.1)
---
updated-dependencies:
- dependency-name: github.com/prometheus/prometheus
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot] <support@github.com>
* Bump client_golang to v1.18.0 (#13373)
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Drop old inmemory samples (#13002)
* Drop old inmemory samples
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Avoid copying timeseries when the feature is disabled
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Run gofmt
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Clarify docs
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Add more logging info
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Remove loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* optimize function and add tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Simplify filter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Update help info from metrics
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use metrics to keep track of drop elements during buildWriteRequest
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* rename var in tests
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* pass time.Now as parameter
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Change buildwriterequest during retries
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Revert "Remove loggers"
This reverts commit 54f91dfcae20488944162335ab4ad8be459df1ab.
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* use log level debug for loggers
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
* Fix linter
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove noisy debug-level logs; add 'reason' label to drop metrics
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove accidentally committed files
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Propagate logger to buildWriteRequest to log dropped data
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix docs comment
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Make drop reason more specific
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Remove unnecessary pass of logger
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Use snake_case for reason label
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
* Fix dropped samples metric
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
---------
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
* fix(discovery): allow requireUpdate util to timeout in discovery/file/file_test.go.
The loop ran indefinitely if the condition isn't met.
Before, each iteration created a new timer channel which was always outpaced by
the other timer channel with smaller duration.
minor detail: There was a memory leak: resources of the ~10 previous timers were
constantly kept. With the fix, we may keep the resources of one timer around for defaultWait
but this isn't worth the changes to make it right.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Merge pull request #13371 from kevinmingtarja/fix-isHeatmapData
ui: fix handling of scalar and string in isHeatmapData
* tsdb/{index,compact}: allow using custom postings encoding format (#13242)
* tsdb/{index,compact}: allow using custom postings encoding format
We would like to experiment with a different postings encoding format in
Thanos so in this change I am proposing adding another argument to
`NewWriter` which would allow users to change the format if needed.
Also, wire the leveled compactor so that it would be possible to change
the format there too.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb/compact: use a struct for leveled compactor options
As discussed on Slack, let's use a struct for the options in leveled
compactor.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: make changes after Bryan's review
- Make changes less intrusive
- Turn the postings encoder type into a function
- Add NewWriterWithEncoder()
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
---------
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0-rc.2
Signed-off-by: bwplotka <bwplotka@gmail.com>
* build(deps): bump actions/setup-go from 3.5.0 to 5.0.0 in /scripts (#13362)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3.5.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/6edd4406fa81c3da01a34fa6f6343087c207a568...0c52d547c9bc32b1aa3301fd7a9cb496313a4491)
---
updated-dependencies:
- dependency-name: actions/setup-go
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump github/codeql-action from 2.22.8 to 3.22.12 (#13358)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.22.8 to 3.22.12.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/407ffafae6a767df3e0230c3df91b6443ae8df75...012739e5082ff0c22ca6d6ab32e07c36df03c4a4)
---
updated-dependencies:
- dependency-name: github/codeql-action
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* put @nexucis has a release shepherd (#13383)
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
* Add analyze histograms command to promtool (#12331)
Add `query analyze` command to promtool
This command analyzes the buckets of classic and native histograms,
based on data queried from the Prometheus query API, i.e. it
doesn't require direct access to the TSDB files.
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* included instance in all necessary descriptions
Signed-off-by: Erik Sommer <ersotech@posteo.de>
* tsdb/compact: fix passing merge func
Fixing a very small logical problem I've introduced :(.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* tsdb: add enable overlapping compaction
This functionality is needed in downstream projects because they have a
separate component that does compaction.
Upstreaming
https://github.com/grafana/mimir-prometheus/blob/7c8e9a2a76fc729e9078889782928b2fdfe240e9/tsdb/compact.go#L323-L325.
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Cut 2.49.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* promtool: allow setting multiple matchers to "promtool tsdb dump" command. (#13296)
Conditions are ANDed inside the same matcher but matchers are ORed
Including unit tests for "promtool tsdb dump".
Refactor some matchers scraping utils.
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Fixed changelog
Signed-off-by: bwplotka <bwplotka@gmail.com>
* tsdb/main: wire "EnableOverlappingCompaction" to tsdb.Options (#13398)
This added the https://github.com/prometheus/prometheus/pull/13393
"EnableOverlappingCompaction" parameter to the compactor code but not to
the tsdb.Options. I forgot about that. Add it to `tsdb.Options` too and
set it to `true` in Prometheus.
Copy/paste the description from
https://github.com/prometheus/prometheus/pull/13393#issuecomment-1891787986
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
* Issue #13268: fix quality value in accept header
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
* Cut 2.49.1 with scrape q= bugfix.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Cut 2.49.1 web package.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Restore more efficient version of NewPossibleNonCounterInfo annotation (#13022)
Restore more efficient version of NewPossibleNonCounterInfo annotation
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Fix regressions introduced by #13242
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* fix slice copy in 1.20 (#13389)
The slices package is added to the standard library in Go 1.21;
we need to import from the exp area to maintain compatibility with Go 1.20.
Signed-off-by: tyltr <tylitianrui@126.com>
* Docs: Query Basics: link to rate (#10538)
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
* chore(kubernetes): check preconditions earlier and avoid unnecessary checks or iterations
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
* Examples: link to `rate` for new users (#10535)
* Examples: link to `rate` for new users
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
* promql: use natural sort in sort_by_label and sort_by_label_desc (#13411)
These functions are intended for humans, as robots can already sort the results
however they please. Humans like things sorted "naturally":
* https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/
A similar thing has been done to Grafana, which is also used by humans:
* https://github.com/grafana/grafana/pull/78024
* https://github.com/grafana/grafana/pull/78494
Signed-off-by: Ivan Babrou <github@ivan.computer>
* TestLabelValuesWithMatchers: Add test case
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* remove obsolete build tag
Signed-off-by: tyltr <tylitianrui@126.com>
* Upgrade some golang dependencies for resty 2.11
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
* Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config (#13222)
Native Histograms: support native_histogram_min_bucket_factor in scrape_config
---------
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram (#13392)
Add warnings for histogramRate applied with isCounter not matching counter/gauge histogram
---------
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
* Minor fixes to otlp vendor update script
Signed-off-by: Goutham <gouthamve@gmail.com>
* build(deps): bump github.com/hetznercloud/hcloud-go/v2
Bumps [github.com/hetznercloud/hcloud-go/v2](https://github.com/hetznercloud/hcloud-go) from 2.4.0 to 2.6.0.
- [Release notes](https://github.com/hetznercloud/hcloud-go/releases)
- [Changelog](https://github.com/hetznercloud/hcloud-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hetznercloud/hcloud-go/compare/v2.4.0...v2.6.0)
---
updated-dependencies:
- dependency-name: github.com/hetznercloud/hcloud-go/v2
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot] <support@github.com>
* Enhanced visibility for `promtool test rules` with JSON colored formatting (#13342)
* Added diff flag for unit test to improvise readability & debugging
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Removed blank spaces
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed linting error
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Added cli flags to documentation
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Revert unrrelated linting fixes
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Fixed review suggestions
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Cleanup
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* Updated flag description
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
---------
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
* storage: skip merging when no remote storage configured
Prometheus is hard-coded to use a fanout storage between TSDB and
a remote storage which by default is empty.
This change detects the empty storage and skips merging between
result sets, which would make `Select()` sort results.
Bottom line: we skip a sort unless there really is some remote storage
configured.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Remove csmarchbanks from remote write owners (#13432)
I have not had the time to keep up with remote write and have no plans
to work on it in the near future so I am withdrawing my maintainership
of that part of the codebase. I continue to focus on client_python.
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
* add more context cancellation check at evaluation time
Signed-off-by: Ben Ye <benye@amazon.com>
* Optimize label values with matchers by taking shortcuts (#13426)
Don't calculate postings beforehand: we may not need them. If all
matchers are for the requested label, we can just filter its values.
Also, if there are no values at all, no need to run any kind of
logic.
Also add more labelValuesWithMatchers benchmarks
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
* Add automatic memory limit handling
Enable automatic detection of memory limits and configure GOMEMLIMIT to
match.
* Also includes a flag to allow controlling the reserved ratio.
Signed-off-by: SuperQ <superq@gmail.com>
* Update OSSF badge link (#13433)
Provide a more user friendly interface
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
* SD Managers taking over responsibility for registration of debug metrics (#13375)
SD Managers take over responsibility for SD metrics registration
---------
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
* Optimize histogram iterators (#13340)
Optimize histogram iterators
Histogram iterators allocate new objects in the AtHistogram and
AtFloatHistogram methods, which makes calculating rates over long
ranges expensive.
In #13215 we allowed an existing object to be reused
when converting an integer histogram to a float histogram. This commit follows
the same idea and allows injecting an existing object in the AtHistogram and
AtFloatHistogram methods. When the injected value is nil, iterators allocate
new histograms, otherwise they populate and return the injected object.
The commit also adds a CopyTo method to Histogram and FloatHistogram which
is used in the BufferedIterator to overwrite items in the ring instead of making
new copies.
Note that a specialized HPoint pool is needed for all of this to work
(`matrixSelectorHPool`).
---------
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* doc: Mark `mad_over_time` as experimental (#13440)
We forgot to do that in
https://github.com/prometheus/prometheus/pull/13059
Signed-off-by: beorn7 <beorn@grafana.com>
* Change metric label for Puppetdb from 'http' to 'puppetdb'
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
* mirror metrics.proto change & generate code
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
* TestHeadLabelValuesWithMatchers: Add test case (#13414)
Add test case to TestHeadLabelValuesWithMatchers, while fixing a couple
of typos in other test cases. Also enclosing some implicit sub-tests in a
`t.Run` call to make them explicitly sub-tests.
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* update all go dependencies (#13438)
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
* build(deps): bump the k8s-io group with 2 updates (#13454)
Bumps the k8s-io group with 2 updates: [k8s.io/api](https://github.com/kubernetes/api) and [k8s.io/client-go](https://github.com/kubernetes/client-go).
Updates `k8s.io/api` from 0.28.4 to 0.29.1
- [Commits](https://github.com/kubernetes/api/compare/v0.28.4...v0.29.1)
Updates `k8s.io/client-go` from 0.28.4 to 0.29.1
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.28.4...v0.29.1)
---
updated-dependencies:
- dependency-name: k8s.io/api
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
- dependency-name: k8s.io/client-go
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: k8s-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump the go-opentelemetry-io group with 1 update (#13453)
Bumps the go-opentelemetry-io group with 1 update: [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector).
Updates `go.opentelemetry.io/collector/semconv` from 0.92.0 to 0.93.0
- [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.92.0...v0.93.0)
---
updated-dependencies:
- dependency-name: go.opentelemetry.io/collector/semconv
dependency-type: direct:production
update-type: version-update:semver-minor
dependency-group: go-opentelemetry-io
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 (#13355)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392)
---
updated-dependencies:
- dependency-name: actions/upload-artifact
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* build(deps): bump bufbuild/buf-push-action (#13357)
Bumps [bufbuild/buf-push-action](https://github.com/bufbuild/buf-push-action) from 342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 to a654ff18effe4641ebea4a4ce242c49800728459.
- [Release notes](https://github.com/bufbuild/buf-push-action/releases)
- [Commits](https://github.com/bufbuild/buf-push-action/compare/342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1...a654ff18effe4641ebea4a4ce242c49800728459)
---
updated-dependencies:
- dependency-name: bufbuild/buf-push-action
dependency-type: direct:production
...
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* Labels: Add DropMetricName function, used in PromQL (#13446)
This function is called very frequently when executing PromQL functions,
and we can do it much more efficiently inside Labels.
In the common case that `__name__` comes first in the labels, we simply
re-point to start at the next label, which is nearly free.
`DropMetricName` is now so cheap I removed the cache - benchmarks show
everything still goes faster.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* tsdb: simplify internal series delete function (#13261)
Lifting an optimisation from Agent code, `seriesHashmap.del` can use
the unique series reference, doesn't need to check Labels.
Also streamline the logic for deleting from `unique` and `conflicts` maps,
and add some comments to help the next person.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* otlptranslator/update-copy.sh: Fix sed command lines
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Rollback k8s.io requirements (#13462)
Rollback k8s.io Go modules to v0.28.6 to avoid forcing upgrade of Go to
1.21. This allows us to keep compatibility with the currently supported
upstream Go releases.
Signed-off-by: SuperQ <superq@gmail.com>
* Make update-copy.sh work for both OSX and GNU sed
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
* Name @beorn7 and @krajorama as maintainers for native histograms
I have been the de-facto maintainer for native histograms from the
beginning. So let's put this into MAINTAINERS.md.
In addition, I hereby proposose George Krajcsovits AKA Krajo as a
co-maintainer. He has contributed a lot of native histogram code, but
more importantly, he has contributed substantially to reviewing other
contributors' native histogram code, up to a point where I was merely
rubberstamping the PRs he had already reviewed. I'm confident that he
is ready to to be granted commit rights as outlined in the
"Maintainers" section of the governance:
https://prometheus.io/governance/#maintainers
According to the same section of the governance, I will announce the
proposed change on the developers mailing list and will give some time
for lazy consensus before merging this PR.
Signed-off-by: beorn7 <beorn@grafana.com>
* ui/fix: correct url handling for stacked graphs (#13460)
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
* tsdb: use cheaper Mutex on series
Mutex is 8 bytes; RWMutex is 24 bytes and much more complicated. Since
`RLock` is only used in two places, `UpdateMetadata` and `Delete`,
neither of which are hotspots, we should use the cheaper one.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Fix last_over_time for native histograms
The last_over_time retains a histogram sample without making a copy.
This sample is now coming from the buffered iterator used for windowing functions,
and can be reused for reading subsequent samples as the iterator progresses.
I would propose copying the sample in the last_over_time function, similar to
how it is done for rate, sum_over_time and others.
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
* Implementation
NOTE:
Rebased from main after refactor in #13014
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Add feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactor concurrency control
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Optimising dependencies/dependents funcs to not produce new slices each request
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Rename flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Refactoring for performance, and to allow controller to be overridden
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Block until all rules, both sync & async, have completed evaluating
Updated & added tests
Review feedback nits
Return empty map if not indeterminate
Use highWatermark to track inflight requests counter
Appease the linter
Clarify feature flag
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
* Fix typo in CLI flag description
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Fixed auto-generated doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improve doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Simplify the design to update concurrency controller once the rule evaluation has done
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Add more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Added more test cases to TestDependenciesEdgeCases
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Improved RuleConcurrencyController interface doc
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Introduced sequentialRuleEvalController
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* Remove superfluous nil check in Group.metrics
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* api: Serialize discovered and target labels into JSON directly (#13469)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* api: Serialize discovered labels into JSON directly in dropped targets (#13484)
Converted maps into labels.Labels to avoid a lot of copying of data which leads to very high memory consumption while opening the /service-discovery endpoint in the Prometheus UI
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
* Add ShardedPostings() support to TSDB (#10421)
This PR is a reference implementation of the proposal described in #10420.
In addition to what described in #10420, in this PR I've introduced labels.StableHash(). The idea is to offer an hashing function which doesn't change over time, and that's used by query sharding in order to get a stable behaviour over time. The implementation of labels.StableHash() is the hashing function used by Prometheus before stringlabels, and what's used by Grafana Mimir for query sharding (because built before stringlabels was a thing).
Follow up work
As mentioned in #10420, if this PR is accepted I'm also open to upload another foundamental piece used by Grafana Mimir query sharding to accelerate the query execution: an optional, configurable and fast in-memory cache for the series hashes.
Signed-off-by: Marco Pracucci <marco@pracucci.com>
* storage/remote: document why two benchmarks are skipped
One was silently doing nothing; one was doing something but the work
didn't go up linearly with iteration count.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
* Pod status changes not discovered by Kube Endpoints SD (#13337)
* fix(discovery/kubernetes/endpoints): react to changes on Pods because some modifications can occur on them without triggering an update on the related Endpoints (The Pod phase changing from Pending to Running e.g.).
---------
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
* Small improvements, add const, remove copypasta (#8106)
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
* Proposal to improve FPointSlice and HPointSlice allocation. (#13448)
* Reusing points slice from previous series when the slice is under utilized
* Adding comments on the bench test
Signed-off-by: Alan Protasio <alanprot@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
---------
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
* remote write 2.0 - follow up improvements (#13478)
* move remote write proto version config from a remote storage config to a
per remote write configuration option
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* rename scrape config for metadata, fix 2.0 header var name/value (was
1.1), and more clean up
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* address review comments, mostly lint fixes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* another lint fix
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* lint imports
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics. (#13502)
* Added commmentary to RW 2.0 protocol for easier adoption and explicit semantics.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Apply suggestions from code review
Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Nico Pazos <32206519+npazosmendez@users.noreply.github.com>
* prw2.0: Added support for "custom" layouts for native histogram proto (#13558)
* prw2.0: Added support for "custom" layouts for native histogram.
Result of the discussions:
* https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924
* https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199
Signed-off-by: bwplotka <bwplotka@gmail.com>
* prw2.0: Added support for "custom" layouts for native histogram.
Result of the discussions:
* https://github.com/prometheus/prometheus/issues/13475#issuecomment-1931496924
* https://cloud-native.slack.com/archives/C02KR205UMU/p1707301006347199
Signed-off-by: bwplotka <bwplotka@gmail.com>
# Conflicts:
# prompb/write/v2/types.pb.go
* Update prompb/write/v2/types.proto
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Addressed comments, fixed test.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
* first draft of content negotiation
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Lint
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix race in test
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix another test race
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Almost done with lint
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fix todos around 405 HEAD handling
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Changes based on review comments
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Update storage/remote/client.go
Co-authored-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Latest updates to review comments
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* latest tweaks
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* remote write 2.0 - content negotiation remediation (#13921)
* Consolidate renegotiation error into one, fix tests
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* fix metric name and actuall increment counter
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
---------
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
* Fixes after main sync.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW 2.0] Moved rw2 proto to the full path (both package name and placement) (#13973)
undefined
* [PRW2.0] Remove benchmark scripts (#13949)
See rationales on https://docs.google.com/document/d/1Bpf7mYjrHUhPHkie0qlnZFxzgqf_L32kM8ZOknSdJrU/edit
Signed-off-by: bwplotka <bwplotka@gmail.com>
* rw20: Update prw commentary after Callum spec review (#14136)
* rw20: Update prw commentary after Callum spec review
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Update types.proto
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
---------
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation. (#14330)
* [PRW 2.0] Updated spec proto (2.0-rc.1); deterministic v1 interop; to be sympathetic with implementation.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* update custom marshalling
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Removed confusing comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW-2.0] (chain1) New Remote Write 2.0 Config options for 2.0-rc.1 spec. (#14335)
NOTE: For simple review this change does not touch remote/ packages, only main and configs.
Spec: https://prometheus.io/docs/specs/remote_write_spec_2_0
Supersedes https://github.com/prometheus/prometheus/pull/13968
Signed-off-by: bwplotka <bwplotka@gmail.com>
* [PRW-2.0] (part 2) Removed automatic negotiation, updates for the latest spec semantics in remote pkg (#14329)
* [PRW-2.0] (part2) Moved to latest basic negotiation & spec semantics.
Spec: https://github.com/prometheus/docs/pull/2462
Supersedes https://github.com/prometheus/prometheus/pull/13968
Signed-off-by: bwplotka <bwplotka@gmail.com>
# Conflicts:
# config/config.go
# docs/configuration/configuration.md
# storage/remote/queue_manager_test.go
# storage/remote/write.go
# web/api/v1/api.go
* Addressed comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
---------
Signed-off-by: bwplotka <bwplotka@gmail.com>
* lint
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* storage/remote tests: refactor: extract function newTestQueueManager
To reduce repetition.
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* use newTestQueueManager for test
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* go mod tidy
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
* [PRW 2.0] (part3) moved type specific conversions to prompb and writev2 codecs.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added test for rwProtoMsgFlagParser; fixed TODO comment.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Renamed DecodeV2WriteRequestStr to DecodeWriteV2Request (with tests).
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed comments on remote_storage example, updated it for 2.0
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Fixed `--enable-feature=metadata-wal-records` docs and error when using PRW 2.0 without it.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed Callum comments on custom*.go
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added TODO to genproto.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed Callum comments in remote pkg.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Added metadata validation to write handler test; fixed ToMetadata.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Addressed rest of Callum comments.
Signed-off-by: bwplotka <bwplotka@gmail.com>
* Fixed writev2.FromMetadataType (was wrongly using prompb).
Signed-off-by: bwplotka <bwplotka@gmail.com>
* fix a few import whitespaces
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* add a default case with an error to the example RW receiver
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* more minor import whitespace chagnes
Signed-off-by: Callum Styan <callumstyan@gmail.com>
* Apply suggestions from code review
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
* Update storage/remote/queue_manager_test.go
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
---------
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: bwplotka <bwplotka@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalist0@gmail.com>
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
Signed-off-by: Erik Sommer <ersotech@posteo.de>
Signed-off-by: Linas Medziunas <linas.medziunas@gmail.com>
Signed-off-by: Arianna Vespri <arianna.vespri@yahoo.it>
Signed-off-by: machine424 <ayoubmrini424@gmail.com>
Signed-off-by: Daniel Nicholls <daniel.nicholls@resdiary.com>
Signed-off-by: Daniel Kerbel <nmdanny@gmail.com>
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
Signed-off-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Signed-off-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Signed-off-by: Marc Tuduri <marctc@protonmail.com>
Signed-off-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Signed-off-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Signed-off-by: Augustin Husson <augustin.husson@amadeus.com>
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>
Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
Signed-off-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Signed-off-by: Marco Pracucci <marco@pracucci.com>
Signed-off-by: tyltr <tylitianrui@126.com>
Signed-off-by: Ted Robertson 10043369+tredondo@users.noreply.github.com
Signed-off-by: Ivan Babrou <github@ivan.computer>
Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
Signed-off-by: Israel Blancas <iblancasa@gmail.com>
Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Signed-off-by: Björn Rabenstein <github@rabenste.in>
Signed-off-by: Goutham <gouthamve@gmail.com>
Signed-off-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Signed-off-by: Chris Marchbanks <csmarchbanks@gmail.com>
Signed-off-by: Ben Ye <benye@amazon.com>
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
Signed-off-by: SuperQ <superq@gmail.com>
Signed-off-by: Ben Kochie <superq@gmail.com>
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Signed-off-by: Paulin Todev <paulin.todev@gmail.com>
Signed-off-by: Filip Petkovski <filip.petkovsky@gmail.com>
Signed-off-by: beorn7 <beorn@grafana.com>
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
Signed-off-by: Yury Moladau <yurymolodov@gmail.com>
Signed-off-by: Danny Kopping <danny.kopping@grafana.com>
Signed-off-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Signed-off-by: Mikhail Fesenko <proggga@gmail.com>
Signed-off-by: Jesus Vazquez <jesusvzpg@gmail.com>
Signed-off-by: Alan Protasio <alanprot@gmail.com>
Signed-off-by: Alex Greenbank <alex.greenbank@grafana.com>
Co-authored-by: Nicolás Pazos <32206519+npazosmendez@users.noreply.github.com>
Co-authored-by: Callum Styan <callumstyan@gmail.com>
Co-authored-by: Nicolás Pazos <npazosmendez@gmail.com>
Co-authored-by: alexgreenbank <alex.greenbank@grafana.com>
Co-authored-by: Marco Pracucci <marco@pracucci.com>
Co-authored-by: Paschalis Tsilias <paschalist0@gmail.com>
Co-authored-by: Julian Wiedmann <jwi@linux.ibm.com>
Co-authored-by: Bryan Boreham <bjboreham@gmail.com>
Co-authored-by: Erik Sommer <ersotech@posteo.de>
Co-authored-by: Linas Medziunas <linas.medziunas@gmail.com>
Co-authored-by: Arianna Vespri <arianna.vespri@yahoo.it>
Co-authored-by: machine424 <ayoubmrini424@gmail.com>
Co-authored-by: daniel-resdiary <109083091+daniel-resdiary@users.noreply.github.com>
Co-authored-by: Daniel Kerbel <nmdanny@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Jan Fajerski <jfajersk@redhat.com>
Co-authored-by: Kevin Mingtarja <kevin.mingtarja@gmail.com>
Co-authored-by: Paschalis Tsilias <tpaschalis@users.noreply.github.com>
Co-authored-by: Marc Tudurí <marctc@protonmail.com>
Co-authored-by: Paschalis Tsilias <paschalis.tsilias@grafana.com>
Co-authored-by: Giedrius Statkevičius <giedrius.statkevicius@vinted.com>
Co-authored-by: Augustin Husson <husson.augustin@gmail.com>
Co-authored-by: Björn Rabenstein <beorn@grafana.com>
Co-authored-by: zenador <zenador@users.noreply.github.com>
Co-authored-by: gotjosh <josue.abreu@gmail.com>
Co-authored-by: Ben Kochie <superq@gmail.com>
Co-authored-by: Kumar Kalpadiptya Roy <kalpadiptya.roy@outlook.com>
Co-authored-by: tyltr <tylitianrui@126.com>
Co-authored-by: Ted Robertson <10043369+tredondo@users.noreply.github.com>
Co-authored-by: Julien Pivotto <roidelapluie@o11y.eu>
Co-authored-by: Matthias Loibl <mail@matthiasloibl.com>
Co-authored-by: Ivan Babrou <github@ivan.computer>
Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com>
Co-authored-by: Israel Blancas <iblancasa@gmail.com>
Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com>
Co-authored-by: George Krajcsovits <krajorama@users.noreply.github.com>
Co-authored-by: Björn Rabenstein <github@rabenste.in>
Co-authored-by: Goutham <gouthamve@gmail.com>
Co-authored-by: Rewanth Tammana <22347290+rewanthtammana@users.noreply.github.com>
Co-authored-by: Chris Marchbanks <csmarchbanks@gmail.com>
Co-authored-by: Ben Ye <benye@amazon.com>
Co-authored-by: Oleg Zaytsev <mail@olegzaytsev.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Paulin Todev <paulin.todev@gmail.com>
Co-authored-by: Filip Petkovski <filip.petkovsky@gmail.com>
Co-authored-by: Yury Molodov <yurymolodov@gmail.com>
Co-authored-by: Danny Kopping <danny.kopping@grafana.com>
Co-authored-by: Leegin <114397475+Leegin-darknight@users.noreply.github.com>
Co-authored-by: Guillermo Sanchez Gavier <gsanchez@newrelic.com>
Co-authored-by: Mikhail Fesenko <proggga@gmail.com>
Co-authored-by: Alan Protasio <alanprot@gmail.com>
2024-07-04 14:29:20 -07:00
|
|
|
options.AppendMetadata,
|
2022-05-03 11:45:52 -07:00
|
|
|
opts.target,
|
2022-07-20 04:35:47 -07:00
|
|
|
options.PassMetadataInContext,
|
2023-09-22 09:47:44 -07:00
|
|
|
metrics,
|
2023-12-11 00:43:42 -08:00
|
|
|
options.skipOffsetting,
|
2024-08-28 08:15:42 -07:00
|
|
|
opts.validationScheme,
|
2017-09-08 05:34:45 -07:00
|
|
|
)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
2019-02-13 05:24:22 -08:00
|
|
|
return sp, nil
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
func (sp *scrapePool) ActiveTargets() []*Target {
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
2018-09-26 02:20:56 -07:00
|
|
|
|
|
|
|
var tActive []*Target
|
|
|
|
for _, t := range sp.activeTargets {
|
|
|
|
tActive = append(tActive, t)
|
|
|
|
}
|
|
|
|
return tActive
|
|
|
|
}
|
|
|
|
|
2023-08-14 07:39:25 -07:00
|
|
|
// Return dropped targets, subject to KeepDroppedTargets limit.
|
2018-09-26 02:20:56 -07:00
|
|
|
func (sp *scrapePool) DroppedTargets() []*Target {
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
2018-09-26 02:20:56 -07:00
|
|
|
return sp.droppedTargets
|
|
|
|
}
|
|
|
|
|
2023-08-14 07:39:25 -07:00
|
|
|
func (sp *scrapePool) DroppedTargetsCount() int {
|
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
|
|
|
return sp.droppedTargetsCount
|
|
|
|
}
|
|
|
|
|
2024-08-26 02:41:56 -07:00
|
|
|
func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) {
|
|
|
|
sp.scrapeFailureLoggerMtx.Lock()
|
|
|
|
defer sp.scrapeFailureLoggerMtx.Unlock()
|
|
|
|
if l != nil {
|
|
|
|
l = log.With(l, "job_name", sp.config.JobName)
|
|
|
|
}
|
|
|
|
sp.scrapeFailureLogger = l
|
|
|
|
|
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
|
|
|
for _, s := range sp.loops {
|
|
|
|
s.setScrapeFailureLogger(sp.scrapeFailureLogger)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sp *scrapePool) getScrapeFailureLogger() log.Logger {
|
|
|
|
sp.scrapeFailureLoggerMtx.RLock()
|
|
|
|
defer sp.scrapeFailureLoggerMtx.RUnlock()
|
|
|
|
return sp.scrapeFailureLogger
|
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// stop terminates all scrape loops and returns after they all terminated.
|
2016-02-22 07:46:55 -08:00
|
|
|
func (sp *scrapePool) stop() {
|
2020-11-12 08:06:25 -08:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2017-12-03 09:14:08 -08:00
|
|
|
sp.cancel()
|
2016-02-22 07:46:55 -08:00
|
|
|
var wg sync.WaitGroup
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
for fp, l := range sp.loops {
|
2016-02-23 05:37:25 -08:00
|
|
|
wg.Add(1)
|
2016-02-17 10:33:17 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
|
|
|
wg.Done()
|
|
|
|
}(l)
|
2016-02-28 00:51:02 -08:00
|
|
|
|
|
|
|
delete(sp.loops, fp)
|
2018-09-26 02:20:56 -07:00
|
|
|
delete(sp.activeTargets, fp)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2020-10-26 07:46:20 -07:00
|
|
|
|
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
wg.Wait()
|
2019-04-10 05:20:00 -07:00
|
|
|
sp.client.CloseIdleConnections()
|
2020-07-30 05:20:24 -07:00
|
|
|
|
|
|
|
if sp.config != nil {
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
2024-04-02 06:56:19 -07:00
|
|
|
sp.metrics.targetScrapePoolSymbolTableItems.DeleteLabelValues(sp.config.JobName)
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// reload the scrape pool with the given scrape configuration. The target state is preserved
|
|
|
|
// but all scrape loops are restarted with the new scrape configuration.
|
2017-05-10 08:59:02 -07:00
|
|
|
// This method returns after all scrape loops that were stopped have stopped scraping.
|
2019-02-13 05:24:22 -08:00
|
|
|
func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
2020-11-12 08:06:25 -08:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolReloads.Inc()
|
2016-03-09 07:33:10 -08:00
|
|
|
start := time.Now()
|
2016-11-22 03:48:30 -08:00
|
|
|
|
2022-03-24 15:16:59 -07:00
|
|
|
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...)
|
2016-02-28 10:21:50 -08:00
|
|
|
if err != nil {
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolReloadsFailed.Inc()
|
2023-11-01 12:06:46 -07:00
|
|
|
return fmt.Errorf("error creating HTTP client: %w", err)
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
2020-01-22 04:13:47 -08:00
|
|
|
|
|
|
|
reuseCache := reusableCache(sp.config, cfg)
|
2016-02-23 04:34:24 -08:00
|
|
|
sp.config = cfg
|
2019-04-10 05:20:00 -07:00
|
|
|
oldClient := sp.client
|
2016-02-28 10:21:50 -08:00
|
|
|
sp.client = client
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
2020-07-30 05:20:24 -07:00
|
|
|
|
2024-04-09 10:43:49 -07:00
|
|
|
sp.restartLoops(reuseCache)
|
|
|
|
oldClient.CloseIdleConnections()
|
|
|
|
sp.metrics.targetReloadIntervalLength.WithLabelValues(time.Duration(sp.config.ScrapeInterval).String()).Observe(
|
|
|
|
time.Since(start).Seconds(),
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sp *scrapePool) restartLoops(reuseCache bool) {
|
2016-02-23 05:37:25 -08:00
|
|
|
var (
|
2021-05-15 19:19:22 -07:00
|
|
|
wg sync.WaitGroup
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
|
|
|
sampleLimit = int(sp.config.SampleLimit)
|
2023-04-21 12:14:19 -07:00
|
|
|
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
2024-01-17 07:58:54 -08:00
|
|
|
maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
|
2021-05-15 19:19:22 -07:00
|
|
|
labelLimits = &labelLimits{
|
2021-05-06 01:56:21 -07:00
|
|
|
labelLimit: int(sp.config.LabelLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
2023-10-31 13:58:42 -07:00
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression = sp.config.EnableCompression
|
2023-10-31 13:58:42 -07:00
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
2016-02-23 05:37:25 -08:00
|
|
|
)
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2024-08-21 07:38:27 -07:00
|
|
|
validationScheme := model.UTF8Validation
|
|
|
|
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
|
|
|
|
validationScheme = model.LegacyValidation
|
2024-07-18 11:08:21 -07:00
|
|
|
}
|
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
forcedErr := sp.refreshTargetLimitErr()
|
2016-02-23 05:37:25 -08:00
|
|
|
for fp, oldLoop := range sp.loops {
|
2020-01-22 04:13:47 -08:00
|
|
|
var cache *scrapeCache
|
|
|
|
if oc := oldLoop.getCache(); reuseCache && oc != nil {
|
2020-03-20 09:43:26 -07:00
|
|
|
oldLoop.disableEndOfRunStalenessMarkers()
|
2020-01-22 04:13:47 -08:00
|
|
|
cache = oc
|
|
|
|
} else {
|
2023-09-22 09:47:44 -07:00
|
|
|
cache = newScrapeCache(sp.metrics)
|
2020-01-22 04:13:47 -08:00
|
|
|
}
|
2021-08-31 08:37:32 -07:00
|
|
|
|
2022-06-28 02:58:52 -07:00
|
|
|
t := sp.activeTargets[fp]
|
|
|
|
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
2016-02-23 05:37:25 -08:00
|
|
|
var (
|
2023-10-10 03:16:55 -07:00
|
|
|
s = &targetScraper{
|
2023-11-20 04:02:53 -08:00
|
|
|
Target: t,
|
|
|
|
client: sp.client,
|
|
|
|
timeout: timeout,
|
|
|
|
bodySizeLimit: bodySizeLimit,
|
2024-07-18 11:08:21 -07:00
|
|
|
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
|
2023-11-20 04:02:53 -08:00
|
|
|
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
2023-10-10 03:16:55 -07:00
|
|
|
}
|
2019-03-12 03:26:18 -07:00
|
|
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
2023-10-31 13:58:42 -07:00
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
2024-01-17 07:58:54 -08:00
|
|
|
maxSchema: maxSchema,
|
2023-10-31 13:58:42 -07:00
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression: enableCompression,
|
2023-10-31 13:58:42 -07:00
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
mrc: mrc,
|
|
|
|
cache: cache,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
2024-08-28 08:15:42 -07:00
|
|
|
validationScheme: validationScheme,
|
2019-03-12 03:26:18 -07:00
|
|
|
})
|
2016-02-23 05:37:25 -08:00
|
|
|
)
|
2022-06-28 02:58:52 -07:00
|
|
|
if err != nil {
|
|
|
|
newLoop.setForcedError(err)
|
|
|
|
}
|
2016-02-23 05:37:25 -08:00
|
|
|
wg.Add(1)
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
go func(oldLoop, newLoop loop) {
|
|
|
|
oldLoop.stop()
|
|
|
|
wg.Done()
|
2016-02-23 04:34:24 -08:00
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
newLoop.setForcedError(forcedErr)
|
2024-08-26 02:41:56 -07:00
|
|
|
newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger())
|
2021-08-31 08:37:32 -07:00
|
|
|
newLoop.run(nil)
|
2016-02-23 05:37:25 -08:00
|
|
|
}(oldLoop, newLoop)
|
|
|
|
|
|
|
|
sp.loops[fp] = newLoop
|
2016-02-23 04:34:24 -08:00
|
|
|
}
|
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2016-02-23 04:34:24 -08:00
|
|
|
wg.Wait()
|
2024-04-02 10:42:40 -07:00
|
|
|
}
|
2023-11-24 11:46:26 -08:00
|
|
|
|
2024-04-02 10:42:40 -07:00
|
|
|
// Must be called with sp.mtx held.
|
|
|
|
func (sp *scrapePool) checkSymbolTable() {
|
2023-11-24 11:46:26 -08:00
|
|
|
// Here we take steps to clear out the symbol table if it has grown a lot.
|
|
|
|
// After waiting some time for things to settle, we take the size of the symbol-table.
|
|
|
|
// If, after some more time, the table has grown to twice that size, we start a new one.
|
|
|
|
const minTimeToCleanSymbolTable = 5 * time.Minute
|
|
|
|
if time.Since(sp.lastSymbolTableCheck) > minTimeToCleanSymbolTable {
|
|
|
|
if sp.initialSymbolTableLen == 0 {
|
|
|
|
sp.initialSymbolTableLen = sp.symbolTable.Len()
|
|
|
|
} else if sp.symbolTable.Len() > 2*sp.initialSymbolTableLen {
|
|
|
|
sp.symbolTable = labels.NewSymbolTable()
|
|
|
|
sp.initialSymbolTableLen = 0
|
2024-04-02 09:07:00 -07:00
|
|
|
sp.restartLoops(false) // To drop all caches.
|
2023-11-24 11:46:26 -08:00
|
|
|
}
|
|
|
|
sp.lastSymbolTableCheck = time.Now()
|
|
|
|
}
|
2016-02-23 04:34:24 -08:00
|
|
|
}
|
|
|
|
|
2016-11-22 03:48:30 -08:00
|
|
|
// Sync converts target groups into actual scrape targets and synchronizes
|
2018-04-09 07:18:25 -07:00
|
|
|
// the currently running scraper with the resulting set and returns all scraped and dropped targets.
|
2018-09-26 02:20:56 -07:00
|
|
|
func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
2020-11-12 08:06:25 -08:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2016-11-22 03:48:30 -08:00
|
|
|
start := time.Now()
|
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
2016-11-22 03:48:30 -08:00
|
|
|
var all []*Target
|
2023-03-07 01:23:34 -08:00
|
|
|
var targets []*Target
|
2023-11-23 11:02:37 -08:00
|
|
|
lb := labels.NewBuilderWithSymbolTable(sp.symbolTable)
|
2018-01-04 06:13:31 -08:00
|
|
|
sp.droppedTargets = []*Target{}
|
2023-08-14 07:39:25 -07:00
|
|
|
sp.droppedTargetsCount = 0
|
2016-11-22 03:48:30 -08:00
|
|
|
for _, tg := range tgs {
|
2023-03-07 01:23:34 -08:00
|
|
|
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb)
|
2021-05-28 14:50:59 -07:00
|
|
|
for _, err := range failures {
|
|
|
|
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
|
2017-12-04 07:12:28 -08:00
|
|
|
for _, t := range targets {
|
2023-03-07 09:11:24 -08:00
|
|
|
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
|
|
|
|
nonEmpty := false
|
|
|
|
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case nonEmpty:
|
2017-12-04 07:12:28 -08:00
|
|
|
all = append(all, t)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case !t.discoveredLabels.IsEmpty():
|
2023-08-20 06:30:36 -07:00
|
|
|
if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets {
|
2023-08-14 07:39:25 -07:00
|
|
|
sp.droppedTargets = append(sp.droppedTargets, t)
|
|
|
|
}
|
|
|
|
sp.droppedTargetsCount++
|
2017-12-04 07:12:28 -08:00
|
|
|
}
|
|
|
|
}
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
2024-04-02 06:56:19 -07:00
|
|
|
sp.metrics.targetScrapePoolSymbolTableItems.WithLabelValues(sp.config.JobName).Set(float64(sp.symbolTable.Len()))
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Unlock()
|
2016-11-22 03:48:30 -08:00
|
|
|
sp.sync(all)
|
2024-04-02 10:42:40 -07:00
|
|
|
sp.checkSymbolTable()
|
2016-11-22 03:48:30 -08:00
|
|
|
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
2016-11-22 03:48:30 -08:00
|
|
|
time.Since(start).Seconds(),
|
|
|
|
)
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
2016-11-22 03:48:30 -08:00
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// sync takes a list of potentially duplicated targets, deduplicates them, starts
|
|
|
|
// scrape loops for new targets, and stops scrape loops for disappeared targets.
|
|
|
|
// It returns after all stopped scrape loops terminated.
|
|
|
|
func (sp *scrapePool) sync(targets []*Target) {
|
2016-02-22 07:46:55 -08:00
|
|
|
var (
|
2021-05-15 19:19:22 -07:00
|
|
|
uniqueLoops = make(map[uint64]loop)
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
|
|
|
sampleLimit = int(sp.config.SampleLimit)
|
2023-04-21 12:14:19 -07:00
|
|
|
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
2024-03-27 08:32:37 -07:00
|
|
|
maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
|
2021-05-15 19:19:22 -07:00
|
|
|
labelLimits = &labelLimits{
|
2021-05-06 01:56:21 -07:00
|
|
|
labelLimit: int(sp.config.LabelLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
2023-10-31 13:58:42 -07:00
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression = sp.config.EnableCompression
|
2023-10-31 13:58:42 -07:00
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
2016-02-22 07:46:55 -08:00
|
|
|
)
|
|
|
|
|
2024-08-21 07:38:27 -07:00
|
|
|
validationScheme := model.UTF8Validation
|
|
|
|
if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig {
|
|
|
|
validationScheme = model.LegacyValidation
|
2024-07-18 11:08:21 -07:00
|
|
|
}
|
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Lock()
|
2016-02-23 05:37:25 -08:00
|
|
|
for _, t := range targets {
|
2016-02-28 10:56:18 -08:00
|
|
|
hash := t.hash()
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
if _, ok := sp.activeTargets[hash]; !ok {
|
2021-08-31 08:37:32 -07:00
|
|
|
// The scrape interval and timeout labels are set to the config's values initially,
|
|
|
|
// so whether changed via relabeling or not, they'll exist and hold the correct values
|
|
|
|
// for every target.
|
|
|
|
var err error
|
|
|
|
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
2023-10-10 03:16:55 -07:00
|
|
|
s := &targetScraper{
|
2023-11-20 04:02:53 -08:00
|
|
|
Target: t,
|
|
|
|
client: sp.client,
|
|
|
|
timeout: timeout,
|
|
|
|
bodySizeLimit: bodySizeLimit,
|
2024-07-18 11:08:21 -07:00
|
|
|
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
|
2023-11-20 04:02:53 -08:00
|
|
|
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
|
|
|
metrics: sp.metrics,
|
2022-10-12 00:48:25 -07:00
|
|
|
}
|
2019-03-12 03:26:18 -07:00
|
|
|
l := sp.newLoop(scrapeLoopOptions{
|
2023-10-31 13:58:42 -07:00
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
2024-03-27 08:32:37 -07:00
|
|
|
maxSchema: maxSchema,
|
2023-10-31 13:58:42 -07:00
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression: enableCompression,
|
2023-10-31 13:58:42 -07:00
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
mrc: mrc,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
2019-03-12 03:26:18 -07:00
|
|
|
})
|
2021-08-31 08:37:32 -07:00
|
|
|
if err != nil {
|
|
|
|
l.setForcedError(err)
|
|
|
|
}
|
2024-08-26 02:41:56 -07:00
|
|
|
l.setScrapeFailureLogger(sp.scrapeFailureLogger)
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets[hash] = t
|
2016-02-28 10:56:18 -08:00
|
|
|
sp.loops[hash] = l
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
uniqueLoops[hash] = l
|
2018-02-07 02:29:27 -08:00
|
|
|
} else {
|
2020-07-30 05:20:24 -07:00
|
|
|
// This might be a duplicated target.
|
|
|
|
if _, ok := uniqueLoops[hash]; !ok {
|
|
|
|
uniqueLoops[hash] = nil
|
|
|
|
}
|
2018-02-07 02:29:27 -08:00
|
|
|
// Need to keep the most updated labels information
|
|
|
|
// for displaying it in the Service Discovery web page.
|
2018-09-26 02:20:56 -07:00
|
|
|
sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels())
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
var wg sync.WaitGroup
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2016-02-23 05:37:25 -08:00
|
|
|
// Stop and remove old targets and scraper loops.
|
2018-09-26 02:20:56 -07:00
|
|
|
for hash := range sp.activeTargets {
|
2020-07-30 05:20:24 -07:00
|
|
|
if _, ok := uniqueLoops[hash]; !ok {
|
2016-02-23 05:37:25 -08:00
|
|
|
wg.Add(1)
|
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
2016-02-22 07:46:55 -08:00
|
|
|
wg.Done()
|
2016-02-28 10:56:18 -08:00
|
|
|
}(sp.loops[hash])
|
2016-02-23 05:37:25 -08:00
|
|
|
|
2016-02-28 10:56:18 -08:00
|
|
|
delete(sp.loops, hash)
|
2018-09-26 02:20:56 -07:00
|
|
|
delete(sp.activeTargets, hash)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-26 07:46:20 -07:00
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops)))
|
2020-07-30 05:20:24 -07:00
|
|
|
forcedErr := sp.refreshTargetLimitErr()
|
|
|
|
for _, l := range sp.loops {
|
|
|
|
l.setForcedError(forcedErr)
|
|
|
|
}
|
|
|
|
for _, l := range uniqueLoops {
|
|
|
|
if l != nil {
|
2021-08-31 08:37:32 -07:00
|
|
|
go l.run(nil)
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
// Wait for all potentially stopped scrapers to terminate.
|
|
|
|
// This covers the case of flapping targets. If the server is under high load, a new scraper
|
|
|
|
// may be active and tries to insert. The old scraper that didn't terminate yet could still
|
|
|
|
// be inserting a previous sample set.
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
// refreshTargetLimitErr returns an error that can be passed to the scrape loops
|
|
|
|
// if the number of targets exceeds the configured limit.
|
|
|
|
func (sp *scrapePool) refreshTargetLimitErr() error {
|
2021-07-27 03:48:55 -07:00
|
|
|
if sp.config == nil || sp.config.TargetLimit == 0 {
|
2020-07-30 05:20:24 -07:00
|
|
|
return nil
|
|
|
|
}
|
2021-07-27 03:48:55 -07:00
|
|
|
if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) {
|
2023-09-22 09:47:44 -07:00
|
|
|
sp.metrics.targetScrapePoolExceededTargetLimit.Inc()
|
2021-07-27 03:48:55 -07:00
|
|
|
return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit)
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
2021-07-27 03:48:55 -07:00
|
|
|
return nil
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
|
|
|
|
2021-05-06 01:56:21 -07:00
|
|
|
func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
|
|
|
|
if limits == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
met := lset.Get(labels.MetricName)
|
|
|
|
if limits.labelLimit > 0 {
|
2022-03-09 14:26:24 -08:00
|
|
|
nbLabels := lset.Len()
|
2023-04-09 00:08:40 -07:00
|
|
|
if nbLabels > limits.labelLimit {
|
2022-03-09 05:27:50 -08:00
|
|
|
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
|
2021-05-06 01:56:21 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-09 14:26:24 -08:00
|
|
|
return lset.Validate(func(l labels.Label) error {
|
2021-05-06 01:56:21 -07:00
|
|
|
if limits.labelNameLengthLimit > 0 {
|
|
|
|
nameLength := len(l.Name)
|
2023-04-09 00:08:40 -07:00
|
|
|
if nameLength > limits.labelNameLengthLimit {
|
2022-03-09 05:27:50 -08:00
|
|
|
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
|
2021-05-06 01:56:21 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if limits.labelValueLengthLimit > 0 {
|
|
|
|
valueLength := len(l.Value)
|
2023-04-09 00:08:40 -07:00
|
|
|
if valueLength > limits.labelValueLengthLimit {
|
2022-03-09 05:27:50 -08:00
|
|
|
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
|
2021-05-06 01:56:21 -07:00
|
|
|
}
|
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
return nil
|
|
|
|
})
|
2021-05-06 01:56:21 -07:00
|
|
|
}
|
|
|
|
|
2018-12-18 03:26:36 -08:00
|
|
|
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
|
2017-09-08 05:34:45 -07:00
|
|
|
lb := labels.NewBuilder(lset)
|
2017-01-30 08:30:28 -08:00
|
|
|
|
2018-04-12 07:54:53 -07:00
|
|
|
if honor {
|
2023-03-07 09:11:24 -08:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2018-02-14 09:03:58 -08:00
|
|
|
if !lset.Has(l.Name) {
|
2017-09-08 05:34:45 -07:00
|
|
|
lb.Set(l.Name, l.Value)
|
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
2017-09-08 05:34:45 -07:00
|
|
|
} else {
|
2022-03-09 14:26:24 -08:00
|
|
|
var conflictingExposedLabels []labels.Label
|
2023-03-07 09:11:24 -08:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2019-08-13 03:19:17 -07:00
|
|
|
existingValue := lset.Get(l.Name)
|
2019-11-20 07:50:05 -08:00
|
|
|
if existingValue != "" {
|
2021-10-15 11:31:03 -07:00
|
|
|
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
|
2019-11-20 07:50:05 -08:00
|
|
|
}
|
2019-08-13 03:19:17 -07:00
|
|
|
// It is now safe to set the target label.
|
2017-09-08 05:34:45 -07:00
|
|
|
lb.Set(l.Name, l.Value)
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
2021-10-15 11:31:03 -07:00
|
|
|
|
|
|
|
if len(conflictingExposedLabels) > 0 {
|
2023-03-07 09:11:24 -08:00
|
|
|
resolveConflictingExposedLabels(lb, conflictingExposedLabels)
|
2021-10-15 11:31:03 -07:00
|
|
|
}
|
2017-01-30 08:30:28 -08:00
|
|
|
}
|
|
|
|
|
2023-03-22 08:46:02 -07:00
|
|
|
res := lb.Labels()
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2018-04-12 07:54:53 -07:00
|
|
|
if len(rc) > 0 {
|
2022-03-09 14:26:24 -08:00
|
|
|
res, _ = relabel.Process(res, rc...)
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2023-03-07 09:11:24 -08:00
|
|
|
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
|
2023-09-21 13:53:51 -07:00
|
|
|
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int {
|
|
|
|
return len(a.Name) - len(b.Name)
|
2021-10-15 11:31:03 -07:00
|
|
|
})
|
|
|
|
|
2023-03-07 09:11:24 -08:00
|
|
|
for _, l := range conflictingExposedLabels {
|
2021-10-15 12:56:48 -07:00
|
|
|
newName := l.Name
|
|
|
|
for {
|
|
|
|
newName = model.ExportedLabelPrefix + newName
|
2023-03-07 09:11:24 -08:00
|
|
|
if lb.Get(newName) == "" {
|
|
|
|
lb.Set(newName, l.Value)
|
2021-10-15 12:56:48 -07:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-10-15 11:31:03 -07:00
|
|
|
}
|
2022-03-09 14:26:24 -08:00
|
|
|
}
|
|
|
|
|
2018-04-12 07:54:53 -07:00
|
|
|
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
|
2017-09-08 05:34:45 -07:00
|
|
|
lb := labels.NewBuilder(lset)
|
|
|
|
|
2023-03-07 09:11:24 -08:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2019-08-13 03:19:17 -07:00
|
|
|
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
|
2017-09-08 05:34:45 -07:00
|
|
|
lb.Set(l.Name, l.Value)
|
2022-03-09 14:26:24 -08:00
|
|
|
})
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2023-03-22 08:46:02 -07:00
|
|
|
return lb.Labels()
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
// appender returns an appender for ingested samples from the target.
|
2024-01-17 07:58:54 -08:00
|
|
|
func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
|
2017-09-08 05:34:45 -07:00
|
|
|
app = &timeLimitAppender{
|
2016-12-30 12:35:35 -08:00
|
|
|
Appender: app,
|
2017-09-08 05:34:45 -07:00
|
|
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
|
|
|
}
|
|
|
|
|
2023-05-04 11:29:50 -07:00
|
|
|
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
|
|
|
if sampleLimit > 0 {
|
2017-09-08 05:34:45 -07:00
|
|
|
app = &limitAppender{
|
|
|
|
Appender: app,
|
2023-05-04 11:29:50 -07:00
|
|
|
limit: sampleLimit,
|
2017-09-08 05:34:45 -07:00
|
|
|
}
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
2023-04-21 12:14:19 -07:00
|
|
|
|
|
|
|
if bucketLimit > 0 {
|
|
|
|
app = &bucketLimitAppender{
|
|
|
|
Appender: app,
|
|
|
|
limit: bucketLimit,
|
|
|
|
}
|
|
|
|
}
|
2024-01-17 07:58:54 -08:00
|
|
|
|
2024-02-28 05:06:43 -08:00
|
|
|
if maxSchema < histogram.ExponentialSchemaMax {
|
2024-01-17 07:58:54 -08:00
|
|
|
app = &maxSchemaAppender{
|
|
|
|
Appender: app,
|
|
|
|
maxSchema: maxSchema,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
return app
|
2016-02-28 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2016-02-25 04:58:46 -08:00
|
|
|
// A scraper retrieves samples and accepts a status report at the end.
|
|
|
|
type scraper interface {
|
2023-10-09 09:23:53 -07:00
|
|
|
scrape(ctx context.Context) (*http.Response, error)
|
|
|
|
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
|
2019-11-11 13:42:24 -08:00
|
|
|
Report(start time.Time, dur time.Duration, err error)
|
2023-05-25 02:49:43 -07:00
|
|
|
offset(interval time.Duration, offsetSeed uint64) time.Duration
|
2016-02-25 04:58:46 -08:00
|
|
|
}
|
|
|
|
|
2016-02-28 10:21:50 -08:00
|
|
|
// targetScraper implements the scraper interface for a target.
|
|
|
|
type targetScraper struct {
|
|
|
|
*Target
|
|
|
|
|
2017-04-04 10:26:28 -07:00
|
|
|
client *http.Client
|
2017-04-27 01:19:55 -07:00
|
|
|
req *http.Request
|
2017-04-04 10:26:28 -07:00
|
|
|
timeout time.Duration
|
2017-01-15 08:33:07 -08:00
|
|
|
|
2017-02-22 04:00:51 -08:00
|
|
|
gzipr *gzip.Reader
|
|
|
|
buf *bufio.Reader
|
2021-05-15 19:19:22 -07:00
|
|
|
|
2023-11-20 04:02:53 -08:00
|
|
|
bodySizeLimit int64
|
|
|
|
acceptHeader string
|
|
|
|
acceptEncodingHeader string
|
2023-09-22 09:47:44 -07:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2017-01-15 08:33:07 -08:00
|
|
|
}
|
|
|
|
|
2021-05-15 19:19:22 -07:00
|
|
|
var errBodySizeLimit = errors.New("body size limit exceeded")
|
|
|
|
|
2023-10-10 03:16:55 -07:00
|
|
|
// acceptHeader transforms preference from the options into specific header values as
|
|
|
|
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
|
|
|
|
// No validation is here, we expect scrape protocols to be validated already.
|
2024-07-18 11:08:21 -07:00
|
|
|
func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string {
|
2023-10-10 03:16:55 -07:00
|
|
|
var vals []string
|
|
|
|
weight := len(config.ScrapeProtocolsHeaders) + 1
|
|
|
|
for _, sp := range sps {
|
2024-07-18 11:08:21 -07:00
|
|
|
val := config.ScrapeProtocolsHeaders[sp]
|
|
|
|
if scheme == model.UTF8Validation {
|
|
|
|
val += ";" + config.UTF8NamesHeader
|
|
|
|
}
|
|
|
|
val += fmt.Sprintf(";q=0.%d", weight)
|
|
|
|
vals = append(vals, val)
|
2023-10-10 03:16:55 -07:00
|
|
|
weight--
|
|
|
|
}
|
|
|
|
// Default match anything.
|
2023-12-19 10:58:59 -08:00
|
|
|
vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight))
|
2023-10-10 03:16:55 -07:00
|
|
|
return strings.Join(vals, ",")
|
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
|
2023-11-20 04:02:53 -08:00
|
|
|
func acceptEncodingHeader(enableCompression bool) string {
|
|
|
|
if enableCompression {
|
|
|
|
return "gzip"
|
|
|
|
}
|
|
|
|
return "identity"
|
|
|
|
}
|
|
|
|
|
2021-09-13 11:10:14 -07:00
|
|
|
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
2017-02-28 05:59:33 -08:00
|
|
|
|
2023-10-09 09:23:53 -07:00
|
|
|
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
|
2017-02-22 04:00:51 -08:00
|
|
|
if s.req == nil {
|
2024-04-08 12:26:23 -07:00
|
|
|
req, err := http.NewRequest(http.MethodGet, s.URL().String(), nil)
|
2017-02-22 04:00:51 -08:00
|
|
|
if err != nil {
|
2023-10-09 09:23:53 -07:00
|
|
|
return nil, err
|
2017-02-22 04:00:51 -08:00
|
|
|
}
|
2022-10-12 00:48:25 -07:00
|
|
|
req.Header.Add("Accept", s.acceptHeader)
|
2023-11-20 04:02:53 -08:00
|
|
|
req.Header.Add("Accept-Encoding", s.acceptEncodingHeader)
|
2021-09-13 11:10:14 -07:00
|
|
|
req.Header.Set("User-Agent", UserAgent)
|
2021-06-18 00:38:12 -07:00
|
|
|
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
2017-02-22 04:00:51 -08:00
|
|
|
|
|
|
|
s.req = req
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
2017-08-09 07:30:49 -07:00
|
|
|
|
2023-10-09 09:23:53 -07:00
|
|
|
return s.client.Do(s.req.WithContext(ctx))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
|
2019-04-18 01:50:37 -07:00
|
|
|
defer func() {
|
2022-04-27 02:24:36 -07:00
|
|
|
io.Copy(io.Discard, resp.Body)
|
2019-04-18 01:50:37 -07:00
|
|
|
resp.Body.Close()
|
|
|
|
}()
|
2016-02-28 10:21:50 -08:00
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2023-11-01 12:06:46 -07:00
|
|
|
return "", fmt.Errorf("server returned HTTP status %s", resp.Status)
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
|
|
|
|
2021-05-15 19:19:22 -07:00
|
|
|
if s.bodySizeLimit <= 0 {
|
|
|
|
s.bodySizeLimit = math.MaxInt64
|
|
|
|
}
|
2017-02-22 04:00:51 -08:00
|
|
|
if resp.Header.Get("Content-Encoding") != "gzip" {
|
2021-05-15 19:19:22 -07:00
|
|
|
n, err := io.Copy(w, io.LimitReader(resp.Body, s.bodySizeLimit))
|
2018-11-26 05:05:07 -08:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-05-15 19:19:22 -07:00
|
|
|
if n >= s.bodySizeLimit {
|
2023-09-22 09:47:44 -07:00
|
|
|
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
2021-05-15 19:19:22 -07:00
|
|
|
return "", errBodySizeLimit
|
|
|
|
}
|
2018-11-26 05:05:07 -08:00
|
|
|
return resp.Header.Get("Content-Type"), nil
|
2017-02-22 04:00:51 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.gzipr == nil {
|
|
|
|
s.buf = bufio.NewReader(resp.Body)
|
2023-10-09 09:23:53 -07:00
|
|
|
var err error
|
2017-02-22 04:00:51 -08:00
|
|
|
s.gzipr, err = gzip.NewReader(s.buf)
|
|
|
|
if err != nil {
|
2018-10-04 06:52:03 -07:00
|
|
|
return "", err
|
2017-02-22 04:00:51 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s.buf.Reset(resp.Body)
|
2023-10-09 09:23:53 -07:00
|
|
|
if err := s.gzipr.Reset(s.buf); err != nil {
|
2018-10-04 06:52:03 -07:00
|
|
|
return "", err
|
2018-08-17 08:24:35 -07:00
|
|
|
}
|
2017-02-22 04:00:51 -08:00
|
|
|
}
|
|
|
|
|
2021-05-15 19:19:22 -07:00
|
|
|
n, err := io.Copy(w, io.LimitReader(s.gzipr, s.bodySizeLimit))
|
2017-02-22 04:00:51 -08:00
|
|
|
s.gzipr.Close()
|
2018-10-04 06:52:03 -07:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-05-15 19:19:22 -07:00
|
|
|
if n >= s.bodySizeLimit {
|
2023-09-22 09:47:44 -07:00
|
|
|
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
2021-05-15 19:19:22 -07:00
|
|
|
return "", errBodySizeLimit
|
|
|
|
}
|
2018-10-04 06:52:03 -07:00
|
|
|
return resp.Header.Get("Content-Type"), nil
|
2016-02-28 10:21:50 -08:00
|
|
|
}
|
|
|
|
|
2016-02-28 00:51:02 -08:00
|
|
|
// A loop can run and be stopped again. It must not be reused after it was stopped.
|
2016-02-22 07:46:55 -08:00
|
|
|
type loop interface {
|
2021-08-31 08:37:32 -07:00
|
|
|
run(errc chan<- error)
|
2020-07-30 05:20:24 -07:00
|
|
|
setForcedError(err error)
|
2024-08-26 02:41:56 -07:00
|
|
|
setScrapeFailureLogger(log.Logger)
|
2016-02-22 07:46:55 -08:00
|
|
|
stop()
|
2020-01-22 04:13:47 -08:00
|
|
|
getCache() *scrapeCache
|
2020-03-20 09:43:26 -07:00
|
|
|
disableEndOfRunStalenessMarkers()
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2017-09-15 02:08:51 -07:00
|
|
|
type cacheEntry struct {
|
2021-11-06 03:10:04 -07:00
|
|
|
ref storage.SeriesRef
|
2017-05-25 23:44:24 -07:00
|
|
|
lastIter uint64
|
2017-09-15 02:08:51 -07:00
|
|
|
hash uint64
|
|
|
|
lset labels.Labels
|
2017-04-13 10:07:23 -07:00
|
|
|
}
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
type scrapeLoop struct {
|
2023-10-31 13:58:42 -07:00
|
|
|
scraper scraper
|
|
|
|
l log.Logger
|
2024-08-26 02:41:56 -07:00
|
|
|
scrapeFailureLogger log.Logger
|
|
|
|
scrapeFailureLoggerMtx sync.RWMutex
|
2023-10-31 13:58:42 -07:00
|
|
|
cache *scrapeCache
|
|
|
|
lastScrapeSize int
|
|
|
|
buffers *pool.Pool
|
|
|
|
offsetSeed uint64
|
|
|
|
honorTimestamps bool
|
|
|
|
trackTimestampsStaleness bool
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression bool
|
2023-10-31 13:58:42 -07:00
|
|
|
forcedErr error
|
|
|
|
forcedErrMtx sync.Mutex
|
|
|
|
sampleLimit int
|
|
|
|
bucketLimit int
|
2024-01-17 07:58:54 -08:00
|
|
|
maxSchema int32
|
2023-10-31 13:58:42 -07:00
|
|
|
labelLimits *labelLimits
|
|
|
|
interval time.Duration
|
|
|
|
timeout time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
2024-08-28 08:15:42 -07:00
|
|
|
validationScheme model.ValidationScheme
|
2024-04-24 06:53:54 -07:00
|
|
|
|
|
|
|
// Feature flagged options.
|
|
|
|
enableNativeHistogramIngestion bool
|
|
|
|
enableCTZeroIngestion bool
|
2017-05-25 23:44:24 -07:00
|
|
|
|
2020-07-24 07:10:51 -07:00
|
|
|
appender func(ctx context.Context) storage.Appender
|
2023-11-24 10:08:56 -08:00
|
|
|
symbolTable *labels.SymbolTable
|
2017-09-08 05:34:45 -07:00
|
|
|
sampleMutator labelsMutator
|
|
|
|
reportSampleMutator labelsMutator
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2022-05-03 11:45:52 -07:00
|
|
|
parentCtx context.Context
|
|
|
|
appenderCtx context.Context
|
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
stopped chan struct{}
|
2020-03-20 09:43:26 -07:00
|
|
|
|
|
|
|
disabledEndOfRunStalenessMarkers bool
|
2021-08-24 05:31:14 -07:00
|
|
|
|
2022-08-31 06:50:05 -07:00
|
|
|
reportExtraMetrics bool
|
|
|
|
appendMetadataToWAL bool
|
2023-09-22 09:47:44 -07:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2023-12-11 00:43:42 -08:00
|
|
|
|
|
|
|
skipOffsetting bool // For testability.
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
|
|
|
// storage references. Additionally, it tracks staleness of series between
|
|
|
|
// scrapes.
|
|
|
|
type scrapeCache struct {
|
|
|
|
iter uint64 // Current scrape iteration.
|
|
|
|
|
2019-03-28 10:52:46 -07:00
|
|
|
// How many series and metadata entries there were at the last success.
|
|
|
|
successfulCount int
|
|
|
|
|
2017-09-15 02:08:51 -07:00
|
|
|
// Parsed string to an entry with information about the actual label set
|
|
|
|
// and its storage reference.
|
2018-05-18 00:32:11 -07:00
|
|
|
series map[string]*cacheEntry
|
2017-05-24 07:23:48 -07:00
|
|
|
|
2017-09-08 05:34:45 -07:00
|
|
|
// Cache of dropped metric strings and their iteration. The iteration must
|
2023-01-04 04:05:42 -08:00
|
|
|
// be a pointer so we can update it.
|
2018-05-18 00:32:11 -07:00
|
|
|
droppedSeries map[string]*uint64
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2017-05-24 07:23:48 -07:00
|
|
|
// seriesCur and seriesPrev store the labels of series that were seen
|
|
|
|
// in the current and previous scrape.
|
2017-05-24 08:05:42 -07:00
|
|
|
// We hold two maps and swap them out to save allocations.
|
2017-05-25 23:44:24 -07:00
|
|
|
seriesCur map[uint64]labels.Labels
|
|
|
|
seriesPrev map[uint64]labels.Labels
|
2018-05-18 00:32:11 -07:00
|
|
|
|
|
|
|
metaMtx sync.Mutex
|
|
|
|
metadata map[string]*metaEntry
|
2023-09-22 09:47:44 -07:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// metaEntry holds meta information about a metric.
|
|
|
|
type metaEntry struct {
|
2022-08-31 06:50:05 -07:00
|
|
|
metadata.Metadata
|
|
|
|
|
|
|
|
lastIter uint64 // Last scrape iteration the entry was observed at.
|
|
|
|
lastIterChange uint64 // Last scrape iteration the entry was changed at.
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
2017-01-15 08:33:07 -08:00
|
|
|
|
2020-01-29 03:13:18 -08:00
|
|
|
func (m *metaEntry) size() int {
|
|
|
|
// The attribute lastIter although part of the struct it is not metadata.
|
2022-08-31 06:50:05 -07:00
|
|
|
return len(m.Help) + len(m.Unit) + len(m.Type)
|
2020-01-29 03:13:18 -08:00
|
|
|
}
|
|
|
|
|
2023-09-22 09:47:44 -07:00
|
|
|
func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
|
2017-05-26 01:44:48 -07:00
|
|
|
return &scrapeCache{
|
2018-05-18 00:32:11 -07:00
|
|
|
series: map[string]*cacheEntry{},
|
|
|
|
droppedSeries: map[string]*uint64{},
|
|
|
|
seriesCur: map[uint64]labels.Labels{},
|
|
|
|
seriesPrev: map[uint64]labels.Labels{},
|
|
|
|
metadata: map[string]*metaEntry{},
|
2023-09-22 09:47:44 -07:00
|
|
|
metrics: metrics,
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 10:52:46 -07:00
|
|
|
func (c *scrapeCache) iterDone(flushCache bool) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
switch {
|
|
|
|
case flushCache:
|
2019-03-28 10:52:46 -07:00
|
|
|
c.successfulCount = count
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 07:14:31 -07:00
|
|
|
case count > c.successfulCount*2+1000:
|
2019-03-28 10:52:46 -07:00
|
|
|
// If a target had varying labels in scrapes that ultimately failed,
|
|
|
|
// the caches would grow indefinitely. Force a flush when this happens.
|
|
|
|
// We use the heuristic that this is a doubling of the cache size
|
|
|
|
// since the last scrape, and allow an additional 1000 in case
|
|
|
|
// initial scrapes all fail.
|
|
|
|
flushCache = true
|
2023-09-22 09:47:44 -07:00
|
|
|
c.metrics.targetScrapeCacheFlushForced.Inc()
|
2019-03-28 10:52:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if flushCache {
|
2019-03-28 10:07:14 -07:00
|
|
|
// All caches may grow over time through series churn
|
|
|
|
// or multiple string representations of the same metric. Clean up entries
|
|
|
|
// that haven't appeared in the last scrape.
|
|
|
|
for s, e := range c.series {
|
|
|
|
if c.iter != e.lastIter {
|
|
|
|
delete(c.series, s)
|
|
|
|
}
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
2019-03-28 10:07:14 -07:00
|
|
|
for s, iter := range c.droppedSeries {
|
|
|
|
if c.iter != *iter {
|
|
|
|
delete(c.droppedSeries, s)
|
|
|
|
}
|
2017-09-08 05:34:45 -07:00
|
|
|
}
|
2019-03-28 10:07:14 -07:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
for m, e := range c.metadata {
|
|
|
|
// Keep metadata around for 10 scrapes after its metric disappeared.
|
|
|
|
if c.iter-e.lastIter > 10 {
|
|
|
|
delete(c.metadata, m)
|
|
|
|
}
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2019-03-28 10:07:14 -07:00
|
|
|
c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
c.iter++
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2017-05-26 01:44:48 -07:00
|
|
|
|
|
|
|
// Swap current and previous series.
|
|
|
|
c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev
|
|
|
|
|
|
|
|
// We have to delete every single key in the map.
|
|
|
|
for k := range c.seriesCur {
|
|
|
|
delete(c.seriesCur, k)
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2024-02-27 04:09:32 -08:00
|
|
|
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
|
2022-12-20 08:54:07 -08:00
|
|
|
e, ok := c.series[string(met)]
|
2017-05-26 01:44:48 -07:00
|
|
|
if !ok {
|
2024-02-27 04:09:32 -08:00
|
|
|
return nil, false, false
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
2024-02-27 04:09:32 -08:00
|
|
|
alreadyScraped := e.lastIter == c.iter
|
2017-05-26 01:44:48 -07:00
|
|
|
e.lastIter = c.iter
|
2024-02-27 04:09:32 -08:00
|
|
|
return e, true, alreadyScraped
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
|
2017-09-07 05:14:41 -07:00
|
|
|
if ref == 0 {
|
2017-06-25 23:56:40 -07:00
|
|
|
return
|
|
|
|
}
|
2022-12-20 08:54:07 -08:00
|
|
|
c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
|
|
|
|
2023-01-04 04:05:42 -08:00
|
|
|
func (c *scrapeCache) addDropped(met []byte) {
|
2017-09-08 05:34:45 -07:00
|
|
|
iter := c.iter
|
2023-01-04 04:05:42 -08:00
|
|
|
c.droppedSeries[string(met)] = &iter
|
2017-09-08 05:34:45 -07:00
|
|
|
}
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
func (c *scrapeCache) getDropped(met []byte) bool {
|
|
|
|
iterp, ok := c.droppedSeries[string(met)]
|
2017-09-08 05:34:45 -07:00
|
|
|
if ok {
|
|
|
|
*iterp = c.iter
|
|
|
|
}
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2017-06-25 23:56:40 -07:00
|
|
|
func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) {
|
|
|
|
c.seriesCur[hash] = lset
|
2017-05-26 01:44:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
|
|
|
|
for h, lset := range c.seriesPrev {
|
|
|
|
if _, ok := c.seriesCur[h]; !ok {
|
|
|
|
if !f(lset) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-12 04:14:36 -08:00
|
|
|
func (c *scrapeCache) setType(metric []byte, t model.MetricType) {
|
2018-05-18 00:32:11 -07:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-05-18 00:32:11 -07:00
|
|
|
if !ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-05-18 00:32:11 -07:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-08-31 06:50:05 -07:00
|
|
|
if e.Type != t {
|
|
|
|
e.Type = t
|
|
|
|
e.lastIterChange = c.iter
|
|
|
|
}
|
2018-05-18 00:32:11 -07:00
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *scrapeCache) setHelp(metric, help []byte) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-05-18 00:32:11 -07:00
|
|
|
if !ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-05-18 00:32:11 -07:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-12-20 08:54:07 -08:00
|
|
|
if e.Help != string(help) {
|
2022-08-31 06:50:05 -07:00
|
|
|
e.Help = string(help)
|
|
|
|
e.lastIterChange = c.iter
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-10-05 09:11:16 -07:00
|
|
|
func (c *scrapeCache) setUnit(metric, unit []byte) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-10-05 09:11:16 -07:00
|
|
|
if !ok {
|
2023-11-22 06:39:21 -08:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-10-05 09:11:16 -07:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-12-20 08:54:07 -08:00
|
|
|
if e.Unit != string(unit) {
|
2022-08-31 06:50:05 -07:00
|
|
|
e.Unit = string(unit)
|
|
|
|
e.lastIterChange = c.iter
|
2018-10-05 09:11:16 -07:00
|
|
|
}
|
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) {
|
2018-05-18 00:32:11 -07:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
m, ok := c.metadata[metric]
|
|
|
|
if !ok {
|
|
|
|
return MetricMetadata{}, false
|
|
|
|
}
|
|
|
|
return MetricMetadata{
|
|
|
|
Metric: metric,
|
2022-08-31 06:50:05 -07:00
|
|
|
Type: m.Type,
|
|
|
|
Help: m.Help,
|
|
|
|
Unit: m.Unit,
|
2018-05-18 00:32:11 -07:00
|
|
|
}, true
|
|
|
|
}
|
|
|
|
|
2019-12-04 07:18:27 -08:00
|
|
|
func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
2018-05-18 00:32:11 -07:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
2018-06-05 03:30:19 -07:00
|
|
|
res := make([]MetricMetadata, 0, len(c.metadata))
|
|
|
|
|
2018-05-18 00:32:11 -07:00
|
|
|
for m, e := range c.metadata {
|
|
|
|
res = append(res, MetricMetadata{
|
|
|
|
Metric: m,
|
2022-08-31 06:50:05 -07:00
|
|
|
Type: e.Type,
|
|
|
|
Help: e.Help,
|
|
|
|
Unit: e.Unit,
|
2018-05-18 00:32:11 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2020-01-29 03:13:18 -08:00
|
|
|
// MetadataSize returns the size of the metadata cache.
|
|
|
|
func (c *scrapeCache) SizeMetadata() (s int) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
for _, e := range c.metadata {
|
|
|
|
s += e.size()
|
|
|
|
}
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// MetadataLen returns the number of metadata entries in the cache.
|
|
|
|
func (c *scrapeCache) LengthMetadata() int {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
return len(c.metadata)
|
|
|
|
}
|
|
|
|
|
2017-11-26 07:15:15 -08:00
|
|
|
func newScrapeLoop(ctx context.Context,
|
2017-05-26 01:44:48 -07:00
|
|
|
sc scraper,
|
|
|
|
l log.Logger,
|
2018-02-13 12:44:51 -08:00
|
|
|
buffers *pool.Pool,
|
2017-09-08 05:34:45 -07:00
|
|
|
sampleMutator labelsMutator,
|
|
|
|
reportSampleMutator labelsMutator,
|
2020-07-24 07:10:51 -07:00
|
|
|
appender func(ctx context.Context) storage.Appender,
|
2018-05-18 00:32:11 -07:00
|
|
|
cache *scrapeCache,
|
2023-11-24 10:08:56 -08:00
|
|
|
symbolTable *labels.SymbolTable,
|
2023-05-25 02:49:43 -07:00
|
|
|
offsetSeed uint64,
|
2019-03-15 03:04:15 -07:00
|
|
|
honorTimestamps bool,
|
2023-10-31 13:58:42 -07:00
|
|
|
trackTimestampsStaleness bool,
|
2023-11-20 04:02:53 -08:00
|
|
|
enableCompression bool,
|
2021-09-03 06:37:42 -07:00
|
|
|
sampleLimit int,
|
2023-04-21 12:14:19 -07:00
|
|
|
bucketLimit int,
|
2024-01-17 07:58:54 -08:00
|
|
|
maxSchema int32,
|
2021-05-06 01:56:21 -07:00
|
|
|
labelLimits *labelLimits,
|
2021-08-31 08:37:32 -07:00
|
|
|
interval time.Duration,
|
|
|
|
timeout time.Duration,
|
2023-05-10 16:59:21 -07:00
|
|
|
scrapeClassicHistograms bool,
|
2024-04-24 06:53:54 -07:00
|
|
|
enableNativeHistogramIngestion bool,
|
2023-12-11 00:43:42 -08:00
|
|
|
enableCTZeroIngestion bool,
|
2021-10-24 14:45:31 -07:00
|
|
|
reportExtraMetrics bool,
|
2022-08-31 06:50:05 -07:00
|
|
|
appendMetadataToWAL bool,
|
2022-05-03 11:45:52 -07:00
|
|
|
target *Target,
|
|
|
|
passMetadataInContext bool,
|
2023-09-22 09:47:44 -07:00
|
|
|
metrics *scrapeMetrics,
|
2023-12-11 00:43:42 -08:00
|
|
|
skipOffsetting bool,
|
2024-08-28 08:15:42 -07:00
|
|
|
validationScheme model.ValidationScheme,
|
2017-05-26 01:44:48 -07:00
|
|
|
) *scrapeLoop {
|
2017-05-16 06:04:37 -07:00
|
|
|
if l == nil {
|
2017-08-11 11:45:52 -07:00
|
|
|
l = log.NewNopLogger()
|
2017-05-16 06:04:37 -07:00
|
|
|
}
|
2017-09-07 05:43:21 -07:00
|
|
|
if buffers == nil {
|
2018-02-13 12:44:51 -08:00
|
|
|
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
2017-09-07 05:43:21 -07:00
|
|
|
}
|
2018-05-18 00:32:11 -07:00
|
|
|
if cache == nil {
|
2023-09-22 09:47:44 -07:00
|
|
|
cache = newScrapeCache(metrics)
|
2018-05-18 00:32:11 -07:00
|
|
|
}
|
2022-05-03 11:45:52 -07:00
|
|
|
|
|
|
|
appenderCtx := ctx
|
|
|
|
|
|
|
|
if passMetadataInContext {
|
|
|
|
// Store the cache and target in the context. This is then used by downstream OTel Collector
|
|
|
|
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
|
|
|
|
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
|
|
|
|
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
|
|
|
|
appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache)
|
|
|
|
appenderCtx = ContextWithTarget(appenderCtx, target)
|
|
|
|
}
|
|
|
|
|
2016-02-22 07:46:55 -08:00
|
|
|
sl := &scrapeLoop{
|
2024-04-24 06:53:54 -07:00
|
|
|
scraper: sc,
|
|
|
|
buffers: buffers,
|
|
|
|
cache: cache,
|
|
|
|
appender: appender,
|
|
|
|
symbolTable: symbolTable,
|
|
|
|
sampleMutator: sampleMutator,
|
|
|
|
reportSampleMutator: reportSampleMutator,
|
|
|
|
stopped: make(chan struct{}),
|
|
|
|
offsetSeed: offsetSeed,
|
|
|
|
l: l,
|
|
|
|
parentCtx: ctx,
|
|
|
|
appenderCtx: appenderCtx,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
enableCompression: enableCompression,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
maxSchema: maxSchema,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
|
|
|
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
|
|
|
|
enableCTZeroIngestion: enableCTZeroIngestion,
|
|
|
|
reportExtraMetrics: reportExtraMetrics,
|
|
|
|
appendMetadataToWAL: appendMetadataToWAL,
|
|
|
|
metrics: metrics,
|
|
|
|
skipOffsetting: skipOffsetting,
|
2024-08-28 08:15:42 -07:00
|
|
|
validationScheme: validationScheme,
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2019-08-28 06:55:09 -07:00
|
|
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
|
|
|
return sl
|
|
|
|
}
|
|
|
|
|
2024-08-26 02:41:56 -07:00
|
|
|
func (sl *scrapeLoop) setScrapeFailureLogger(l log.Logger) {
|
|
|
|
sl.scrapeFailureLoggerMtx.Lock()
|
|
|
|
defer sl.scrapeFailureLoggerMtx.Unlock()
|
|
|
|
if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil {
|
|
|
|
l = log.With(l, "target", ts.String())
|
|
|
|
}
|
|
|
|
sl.scrapeFailureLogger = l
|
|
|
|
}
|
|
|
|
|
2021-08-31 08:37:32 -07:00
|
|
|
func (sl *scrapeLoop) run(errc chan<- error) {
|
2023-12-11 00:43:42 -08:00
|
|
|
if !sl.skipOffsetting {
|
|
|
|
select {
|
|
|
|
case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)):
|
|
|
|
// Continue after a scraping offset.
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
close(sl.stopped)
|
|
|
|
return
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
var last time.Time
|
|
|
|
|
2021-03-15 06:05:17 -07:00
|
|
|
alignedScrapeTime := time.Now().Round(0)
|
2021-08-31 08:37:32 -07:00
|
|
|
ticker := time.NewTicker(sl.interval)
|
2016-02-22 07:46:55 -08:00
|
|
|
defer ticker.Stop()
|
|
|
|
|
2017-05-10 08:59:02 -07:00
|
|
|
mainLoop:
|
2016-02-22 07:46:55 -08:00
|
|
|
for {
|
|
|
|
select {
|
2019-08-28 06:55:09 -07:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 08:59:02 -07:00
|
|
|
close(sl.stopped)
|
2016-02-22 07:46:55 -08:00
|
|
|
return
|
2019-08-28 06:55:09 -07:00
|
|
|
case <-sl.ctx.Done():
|
2017-05-10 08:59:02 -07:00
|
|
|
break mainLoop
|
2016-02-22 07:46:55 -08:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2020-10-05 09:17:50 -07:00
|
|
|
// Temporary workaround for a jitter in go timers that causes disk space
|
|
|
|
// increase in TSDB.
|
|
|
|
// See https://github.com/prometheus/prometheus/issues/7846
|
2021-03-15 06:05:17 -07:00
|
|
|
// Calling Round ensures the time used is the wall clock, as otherwise .Sub
|
|
|
|
// and .Add on time.Time behave differently (see time package docs).
|
|
|
|
scrapeTime := time.Now().Round(0)
|
2024-02-21 06:09:21 -08:00
|
|
|
if AlignScrapeTimestamps {
|
2024-03-08 02:18:18 -08:00
|
|
|
// Tolerance is clamped to maximum 1% of the scrape interval.
|
|
|
|
tolerance := min(sl.interval/100, ScrapeTimestampTolerance)
|
2020-10-06 04:48:24 -07:00
|
|
|
// For some reason, a tick might have been skipped, in which case we
|
2020-10-05 09:17:50 -07:00
|
|
|
// would call alignedScrapeTime.Add(interval) multiple times.
|
2021-08-31 08:37:32 -07:00
|
|
|
for scrapeTime.Sub(alignedScrapeTime) >= sl.interval {
|
|
|
|
alignedScrapeTime = alignedScrapeTime.Add(sl.interval)
|
2020-10-05 09:17:50 -07:00
|
|
|
}
|
|
|
|
// Align the scrape time if we are in the tolerance boundaries.
|
2024-02-21 06:09:21 -08:00
|
|
|
if scrapeTime.Sub(alignedScrapeTime) <= tolerance {
|
2020-10-05 09:17:50 -07:00
|
|
|
scrapeTime = alignedScrapeTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-24 07:38:21 -07:00
|
|
|
last = sl.scrapeAndReport(last, scrapeTime, errc)
|
2016-02-22 09:49:26 -08:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
select {
|
|
|
|
case <-sl.parentCtx.Done():
|
|
|
|
close(sl.stopped)
|
|
|
|
return
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
break mainLoop
|
|
|
|
case <-ticker.C:
|
2016-12-29 00:27:30 -08:00
|
|
|
}
|
2020-07-16 04:53:39 -07:00
|
|
|
}
|
2018-02-20 03:32:23 -08:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
close(sl.stopped)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
if !sl.disabledEndOfRunStalenessMarkers {
|
2021-08-31 08:37:32 -07:00
|
|
|
sl.endOfRunStaleness(last, ticker, sl.interval)
|
2020-07-16 04:53:39 -07:00
|
|
|
}
|
|
|
|
}
|
2017-09-07 05:43:21 -07:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
// scrapeAndReport performs a scrape and then appends the result to the storage
|
|
|
|
// together with reporting metrics, by using as few appenders as possible.
|
|
|
|
// In the happy scenario, a single appender is used.
|
2022-05-03 11:45:52 -07:00
|
|
|
// This function uses sl.appenderCtx instead of sl.ctx on purpose. A scrape should
|
2020-08-07 06:58:16 -07:00
|
|
|
// only be cancelled on shutdown, not on reloads.
|
2021-10-24 07:38:21 -07:00
|
|
|
func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time {
|
2020-07-30 05:20:24 -07:00
|
|
|
start := time.Now()
|
2017-06-14 19:08:03 -07:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
// Only record after the first scrape.
|
|
|
|
if !last.IsZero() {
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe(
|
2020-07-16 04:53:39 -07:00
|
|
|
time.Since(last).Seconds(),
|
|
|
|
)
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2023-10-09 09:23:53 -07:00
|
|
|
var total, added, seriesAdded, bytesRead int
|
2020-07-30 05:20:24 -07:00
|
|
|
var err, appErr, scrapeErr error
|
2020-07-31 10:11:08 -07:00
|
|
|
|
2022-05-03 11:45:52 -07:00
|
|
|
app := sl.appender(sl.appenderCtx)
|
2020-07-16 04:53:39 -07:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
app.Rollback()
|
2016-02-22 07:46:55 -08:00
|
|
|
return
|
2020-07-16 04:53:39 -07:00
|
|
|
}
|
|
|
|
err = app.Commit()
|
|
|
|
if err != nil {
|
|
|
|
level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
2020-07-31 10:11:08 -07:00
|
|
|
|
|
|
|
defer func() {
|
2023-10-09 09:23:53 -07:00
|
|
|
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
|
2020-07-31 10:11:08 -07:00
|
|
|
level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
if forcedErr := sl.getForcedError(); forcedErr != nil {
|
2020-07-31 10:11:08 -07:00
|
|
|
scrapeErr = forcedErr
|
2020-07-30 05:20:24 -07:00
|
|
|
// Add stale markers.
|
2020-10-05 09:17:50 -07:00
|
|
|
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
2020-07-16 04:53:39 -07:00
|
|
|
app.Rollback()
|
2022-05-03 11:45:52 -07:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-16 04:53:39 -07:00
|
|
|
level.Warn(sl.l).Log("msg", "Append failed", "err", err)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2020-07-30 05:20:24 -07:00
|
|
|
if errc != nil {
|
|
|
|
errc <- forcedErr
|
|
|
|
}
|
2020-07-31 10:11:08 -07:00
|
|
|
|
|
|
|
return start
|
|
|
|
}
|
|
|
|
|
|
|
|
var contentType string
|
2023-10-09 09:23:53 -07:00
|
|
|
var resp *http.Response
|
|
|
|
var b []byte
|
|
|
|
var buf *bytes.Buffer
|
2021-10-24 07:38:21 -07:00
|
|
|
scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout)
|
2023-10-09 09:23:53 -07:00
|
|
|
resp, scrapeErr = sl.scraper.scrape(scrapeCtx)
|
|
|
|
if scrapeErr == nil {
|
|
|
|
b = sl.buffers.Get(sl.lastScrapeSize).([]byte)
|
|
|
|
defer sl.buffers.Put(b)
|
|
|
|
buf = bytes.NewBuffer(b)
|
|
|
|
contentType, scrapeErr = sl.scraper.readResponse(scrapeCtx, resp, buf)
|
|
|
|
}
|
2020-07-31 10:11:08 -07:00
|
|
|
cancel()
|
|
|
|
|
|
|
|
if scrapeErr == nil {
|
|
|
|
b = buf.Bytes()
|
|
|
|
// NOTE: There were issues with misbehaving clients in the past
|
|
|
|
// that occasionally returned empty results. We don't want those
|
|
|
|
// to falsely reset our buffer size.
|
|
|
|
if len(b) > 0 {
|
|
|
|
sl.lastScrapeSize = len(b)
|
|
|
|
}
|
2023-10-09 09:23:53 -07:00
|
|
|
bytesRead = len(b)
|
2020-07-30 05:20:24 -07:00
|
|
|
} else {
|
2020-08-01 01:56:21 -07:00
|
|
|
level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr)
|
2024-08-26 02:41:56 -07:00
|
|
|
sl.scrapeFailureLoggerMtx.RLock()
|
|
|
|
if sl.scrapeFailureLogger != nil {
|
|
|
|
sl.scrapeFailureLogger.Log("err", scrapeErr)
|
|
|
|
}
|
|
|
|
sl.scrapeFailureLoggerMtx.RUnlock()
|
2020-07-31 10:11:08 -07:00
|
|
|
if errc != nil {
|
|
|
|
errc <- scrapeErr
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
2021-10-24 14:45:31 -07:00
|
|
|
if errors.Is(scrapeErr, errBodySizeLimit) {
|
2023-10-09 09:23:53 -07:00
|
|
|
bytesRead = -1
|
2021-10-24 14:45:31 -07:00
|
|
|
}
|
2020-07-31 10:11:08 -07:00
|
|
|
}
|
2020-07-30 05:20:24 -07:00
|
|
|
|
2020-07-31 10:11:08 -07:00
|
|
|
// A failed scrape is the same as an empty scrape,
|
|
|
|
// we still call sl.append to trigger stale markers.
|
2020-10-05 09:17:50 -07:00
|
|
|
total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime)
|
2020-07-31 10:11:08 -07:00
|
|
|
if appErr != nil {
|
|
|
|
app.Rollback()
|
2022-05-03 11:45:52 -07:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-31 10:11:08 -07:00
|
|
|
level.Debug(sl.l).Log("msg", "Append failed", "err", appErr)
|
|
|
|
// The append failed, probably due to a parse error or sample limit.
|
|
|
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
2020-10-05 09:17:50 -07:00
|
|
|
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
2020-07-30 05:20:24 -07:00
|
|
|
app.Rollback()
|
2022-05-03 11:45:52 -07:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-31 10:11:08 -07:00
|
|
|
level.Warn(sl.l).Log("msg", "Append failed", "err", err)
|
2020-07-30 05:20:24 -07:00
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2017-05-10 08:59:02 -07:00
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
if scrapeErr == nil {
|
|
|
|
scrapeErr = appErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return start
|
2017-05-16 05:12:21 -07:00
|
|
|
}
|
|
|
|
|
2020-07-30 05:20:24 -07:00
|
|
|
func (sl *scrapeLoop) setForcedError(err error) {
|
|
|
|
sl.forcedErrMtx.Lock()
|
|
|
|
defer sl.forcedErrMtx.Unlock()
|
|
|
|
sl.forcedErr = err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *scrapeLoop) getForcedError() error {
|
|
|
|
sl.forcedErrMtx.Lock()
|
|
|
|
defer sl.forcedErrMtx.Unlock()
|
|
|
|
return sl.forcedErr
|
|
|
|
}
|
|
|
|
|
2017-05-16 05:12:21 -07:00
|
|
|
func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) {
|
2017-05-10 08:59:02 -07:00
|
|
|
// Scraping has stopped. We want to write stale markers but
|
|
|
|
// the target may be recreated, so we wait just over 2 scrape intervals
|
|
|
|
// before creating them.
|
2018-11-27 08:44:29 -08:00
|
|
|
// If the context is canceled, we presume the server is shutting down
|
2017-05-10 08:59:02 -07:00
|
|
|
// and will restart where is was. We do not attempt to write stale markers
|
|
|
|
// in this case.
|
|
|
|
|
|
|
|
if last.IsZero() {
|
|
|
|
// There never was a scrape, so there will be no stale markers.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for when the next scrape would have been, record its timestamp.
|
|
|
|
var staleTime time.Time
|
|
|
|
select {
|
2019-08-28 06:55:09 -07:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 08:59:02 -07:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
staleTime = time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for when the next scrape would have been, if the target was recreated
|
|
|
|
// samples should have been ingested by now.
|
|
|
|
select {
|
2019-08-28 06:55:09 -07:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 08:59:02 -07:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for an extra 10% of the interval, just to be safe.
|
|
|
|
select {
|
2019-08-28 06:55:09 -07:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 08:59:02 -07:00
|
|
|
return
|
|
|
|
case <-time.After(interval / 10):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
|
|
|
// If the target has since been recreated and scraped, the
|
|
|
|
// stale markers will be out of order and ignored.
|
2022-05-03 11:45:52 -07:00
|
|
|
// sl.context would have been cancelled, hence using sl.appenderCtx.
|
|
|
|
app := sl.appender(sl.appenderCtx)
|
2020-07-16 04:53:39 -07:00
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = app.Commit()
|
|
|
|
if err != nil {
|
|
|
|
level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
|
|
|
|
app.Rollback()
|
2022-05-03 11:45:52 -07:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-16 04:53:39 -07:00
|
|
|
level.Warn(sl.l).Log("msg", "Stale append failed", "err", err)
|
2017-05-10 08:59:02 -07:00
|
|
|
}
|
2020-07-16 04:53:39 -07:00
|
|
|
if err = sl.reportStale(app, staleTime); err != nil {
|
|
|
|
level.Warn(sl.l).Log("msg", "Stale report failed", "err", err)
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2017-05-10 08:59:02 -07:00
|
|
|
// Stop the scraping. May still write data and stale markers after it has
|
|
|
|
// returned. Cancel the context to stop all writes.
|
2016-02-22 07:46:55 -08:00
|
|
|
func (sl *scrapeLoop) stop() {
|
|
|
|
sl.cancel()
|
2017-05-10 08:59:02 -07:00
|
|
|
<-sl.stopped
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2020-03-20 09:43:26 -07:00
|
|
|
func (sl *scrapeLoop) disableEndOfRunStalenessMarkers() {
|
|
|
|
sl.disabledEndOfRunStalenessMarkers = true
|
|
|
|
}
|
|
|
|
|
2020-01-22 04:13:47 -08:00
|
|
|
func (sl *scrapeLoop) getCache() *scrapeCache {
|
|
|
|
return sl.cache
|
|
|
|
}
|
|
|
|
|
2020-03-25 19:31:48 -07:00
|
|
|
type appendErrors struct {
|
2023-04-21 12:14:19 -07:00
|
|
|
numOutOfOrder int
|
|
|
|
numDuplicates int
|
|
|
|
numOutOfBounds int
|
|
|
|
numExemplarOutOfOrder int
|
2020-03-25 19:31:48 -07:00
|
|
|
}
|
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
2023-11-24 10:08:56 -08:00
|
|
|
p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable)
|
2022-02-08 02:01:37 -08:00
|
|
|
if err != nil {
|
2022-02-08 01:57:56 -08:00
|
|
|
level.Debug(sl.l).Log(
|
2022-02-08 02:01:37 -08:00
|
|
|
"msg", "Invalid content type on scrape, using prometheus parser as fallback.",
|
|
|
|
"content_type", contentType,
|
|
|
|
"err", err,
|
2022-02-08 01:57:56 -08:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2016-04-25 07:43:52 -07:00
|
|
|
var (
|
2022-08-31 06:50:05 -07:00
|
|
|
defTime = timestamp.FromTime(ts)
|
|
|
|
appErrs = appendErrors{}
|
|
|
|
sampleLimitErr error
|
2023-04-21 12:14:19 -07:00
|
|
|
bucketLimitErr error
|
2023-10-05 04:04:59 -07:00
|
|
|
lset labels.Labels // escapes to heap so hoisted out of loop
|
2022-08-31 06:50:05 -07:00
|
|
|
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
|
|
|
meta metadata.Metadata
|
|
|
|
metadataChanged bool
|
2016-04-25 07:43:52 -07:00
|
|
|
)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2023-11-16 06:07:37 -08:00
|
|
|
exemplars := make([]exemplar.Exemplar, 1)
|
|
|
|
|
2022-08-31 06:50:05 -07:00
|
|
|
// updateMetadata updates the current iteration's metadata object and the
|
|
|
|
// metadataChanged value if we have metadata in the scrape cache AND the
|
|
|
|
// labelset is for a new series or the metadata for this series has just
|
|
|
|
// changed. It returns a boolean based on whether the metadata was updated.
|
|
|
|
updateMetadata := func(lset labels.Labels, isNewSeries bool) bool {
|
|
|
|
if !sl.appendMetadataToWAL {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
sl.cache.metaMtx.Lock()
|
|
|
|
defer sl.cache.metaMtx.Unlock()
|
2022-12-20 08:54:07 -08:00
|
|
|
metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)]
|
2022-08-31 06:50:05 -07:00
|
|
|
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
|
|
|
|
metadataChanged = true
|
|
|
|
meta.Type = metaEntry.Type
|
|
|
|
meta.Unit = metaEntry.Unit
|
|
|
|
meta.Help = metaEntry.Help
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-12-10 04:03:28 -08:00
|
|
|
// Take an appender with limits.
|
2024-01-17 07:58:54 -08:00
|
|
|
app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
2021-12-10 04:03:28 -08:00
|
|
|
|
2020-03-13 12:54:47 -07:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Only perform cache cleaning if the scrape was not empty.
|
|
|
|
// An empty scrape (usually) is used to indicate a failed scrape.
|
|
|
|
sl.cache.iterDone(len(b) > 0)
|
|
|
|
}()
|
|
|
|
|
2017-02-01 06:59:37 -08:00
|
|
|
loop:
|
2018-05-14 13:19:53 -07:00
|
|
|
for {
|
2020-03-25 19:31:48 -07:00
|
|
|
var (
|
2021-06-29 14:45:23 -07:00
|
|
|
et textparse.Entry
|
|
|
|
sampleAdded, isHistogram bool
|
|
|
|
met []byte
|
|
|
|
parsedTimestamp *int64
|
|
|
|
val float64
|
2021-11-12 10:07:41 -08:00
|
|
|
h *histogram.Histogram
|
2022-12-28 00:55:07 -08:00
|
|
|
fh *histogram.FloatHistogram
|
2020-03-25 19:31:48 -07:00
|
|
|
)
|
2018-05-14 13:19:53 -07:00
|
|
|
if et, err = p.Next(); err != nil {
|
2022-12-29 07:23:07 -08:00
|
|
|
if errors.Is(err, io.EOF) {
|
2018-05-14 13:19:53 -07:00
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2018-05-18 00:32:11 -07:00
|
|
|
switch et {
|
|
|
|
case textparse.EntryType:
|
|
|
|
sl.cache.setType(p.Type())
|
|
|
|
continue
|
|
|
|
case textparse.EntryHelp:
|
|
|
|
sl.cache.setHelp(p.Help())
|
2018-05-14 13:19:53 -07:00
|
|
|
continue
|
2018-10-05 09:11:16 -07:00
|
|
|
case textparse.EntryUnit:
|
|
|
|
sl.cache.setUnit(p.Unit())
|
|
|
|
continue
|
2018-05-18 00:32:11 -07:00
|
|
|
case textparse.EntryComment:
|
|
|
|
continue
|
2021-06-29 14:45:23 -07:00
|
|
|
case textparse.EntryHistogram:
|
|
|
|
isHistogram = true
|
2018-05-18 00:32:11 -07:00
|
|
|
default:
|
2018-05-14 13:19:53 -07:00
|
|
|
}
|
2017-01-30 08:30:28 -08:00
|
|
|
total++
|
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
t := defTime
|
2021-06-29 14:45:23 -07:00
|
|
|
if isHistogram {
|
2022-12-28 00:55:07 -08:00
|
|
|
met, parsedTimestamp, h, fh = p.Histogram()
|
2021-06-29 14:45:23 -07:00
|
|
|
} else {
|
|
|
|
met, parsedTimestamp, val = p.Series()
|
|
|
|
}
|
2019-03-15 03:04:15 -07:00
|
|
|
if !sl.honorTimestamps {
|
2021-06-29 14:45:23 -07:00
|
|
|
parsedTimestamp = nil
|
2019-03-15 03:04:15 -07:00
|
|
|
}
|
2021-06-29 14:45:23 -07:00
|
|
|
if parsedTimestamp != nil {
|
|
|
|
t = *parsedTimestamp
|
2017-01-15 08:33:07 -08:00
|
|
|
}
|
|
|
|
|
2022-08-31 06:50:05 -07:00
|
|
|
// Zero metadata out for current iteration until it's resolved.
|
|
|
|
meta = metadata.Metadata{}
|
|
|
|
metadataChanged = false
|
|
|
|
|
2022-12-20 08:54:07 -08:00
|
|
|
if sl.cache.getDropped(met) {
|
2017-09-08 05:34:45 -07:00
|
|
|
continue
|
|
|
|
}
|
2024-02-27 04:09:32 -08:00
|
|
|
ce, ok, seriesAlreadyScraped := sl.cache.get(met)
|
2021-02-18 04:07:00 -08:00
|
|
|
var (
|
2021-11-06 03:10:04 -07:00
|
|
|
ref storage.SeriesRef
|
2021-02-18 04:07:00 -08:00
|
|
|
hash uint64
|
|
|
|
)
|
2020-03-25 19:31:48 -07:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
if ok {
|
2021-02-18 04:07:00 -08:00
|
|
|
ref = ce.ref
|
|
|
|
lset = ce.lset
|
2023-11-16 05:22:28 -08:00
|
|
|
hash = ce.hash
|
2022-08-31 06:50:05 -07:00
|
|
|
|
|
|
|
// Update metadata only if it changed in the current iteration.
|
|
|
|
updateMetadata(lset, false)
|
2021-02-18 04:07:00 -08:00
|
|
|
} else {
|
2023-01-04 04:05:42 -08:00
|
|
|
p.Metric(&lset)
|
2021-02-18 04:07:00 -08:00
|
|
|
hash = lset.Hash()
|
2017-09-15 02:08:51 -07:00
|
|
|
|
|
|
|
// Hash label set as it is seen local to the target. Then add target labels
|
|
|
|
// and relabeling and store the final label set.
|
|
|
|
lset = sl.sampleMutator(lset)
|
|
|
|
|
2022-03-09 14:26:24 -08:00
|
|
|
// The label set may be set to empty to indicate dropping.
|
|
|
|
if lset.IsEmpty() {
|
2023-01-04 04:05:42 -08:00
|
|
|
sl.cache.addDropped(met)
|
2017-09-15 02:08:51 -07:00
|
|
|
continue
|
2017-06-25 23:56:40 -07:00
|
|
|
}
|
2017-01-30 08:30:28 -08:00
|
|
|
|
2020-03-01 23:18:05 -08:00
|
|
|
if !lset.Has(labels.MetricName) {
|
|
|
|
err = errNameLabelMandatory
|
|
|
|
break loop
|
|
|
|
}
|
2024-08-28 08:15:42 -07:00
|
|
|
if !lset.IsValid(sl.validationScheme) {
|
2022-12-07 19:09:43 -08:00
|
|
|
err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
|
|
|
|
break loop
|
|
|
|
}
|
2021-05-06 01:56:21 -07:00
|
|
|
|
|
|
|
// If any label limits is exceeded the scrape should fail.
|
|
|
|
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
|
2021-05-06 01:56:21 -07:00
|
|
|
break loop
|
|
|
|
}
|
2022-08-31 06:50:05 -07:00
|
|
|
|
|
|
|
// Append metadata for new series if they were present.
|
|
|
|
updateMetadata(lset, true)
|
2021-02-18 04:07:00 -08:00
|
|
|
}
|
2020-03-01 23:18:05 -08:00
|
|
|
|
2024-08-19 02:58:35 -07:00
|
|
|
if seriesAlreadyScraped && parsedTimestamp == nil {
|
2023-11-16 05:22:28 -08:00
|
|
|
err = storage.ErrDuplicateSampleForTimestamp
|
|
|
|
} else {
|
2024-09-02 23:33:44 -07:00
|
|
|
if sl.enableCTZeroIngestion {
|
|
|
|
if ctMs := p.CreatedTimestamp(); ctMs != nil {
|
|
|
|
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
|
|
|
|
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
|
|
|
|
// CT is an experimental feature. For now, we don't need to fail the
|
|
|
|
// scrape on errors updating the created timestamp, log debug.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err)
|
|
|
|
}
|
2023-11-16 05:22:28 -08:00
|
|
|
}
|
2023-12-11 00:43:42 -08:00
|
|
|
}
|
|
|
|
|
2024-04-24 06:53:54 -07:00
|
|
|
if isHistogram && sl.enableNativeHistogramIngestion {
|
2023-11-16 05:22:28 -08:00
|
|
|
if h != nil {
|
|
|
|
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
|
|
|
|
} else {
|
|
|
|
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
|
|
|
|
}
|
2022-12-28 00:55:07 -08:00
|
|
|
} else {
|
2023-11-16 05:22:28 -08:00
|
|
|
ref, err = app.Append(ref, lset, t, val)
|
2022-08-25 08:07:41 -07:00
|
|
|
}
|
2021-06-29 14:45:23 -07:00
|
|
|
}
|
2023-11-16 05:22:28 -08:00
|
|
|
|
2023-11-28 09:42:29 -08:00
|
|
|
if err == nil {
|
|
|
|
if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
|
|
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
2021-02-18 04:07:00 -08:00
|
|
|
if err != nil {
|
2023-11-01 12:06:46 -07:00
|
|
|
if !errors.Is(err, storage.ErrNotFound) {
|
2021-02-18 04:07:00 -08:00
|
|
|
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2021-02-18 04:07:00 -08:00
|
|
|
break loop
|
|
|
|
}
|
2020-03-25 19:31:48 -07:00
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
if !ok {
|
2023-10-31 13:58:42 -07:00
|
|
|
if parsedTimestamp == nil || sl.trackTimestampsStaleness {
|
2017-04-28 08:36:36 -07:00
|
|
|
// Bypass staleness logic if there is an explicit timestamp.
|
2017-06-25 23:56:40 -07:00
|
|
|
sl.cache.trackStaleness(hash, lset)
|
2017-04-28 08:36:36 -07:00
|
|
|
}
|
2022-12-20 08:54:07 -08:00
|
|
|
sl.cache.addRef(met, ref, lset, hash)
|
2023-04-21 12:14:19 -07:00
|
|
|
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
|
2020-03-25 19:31:48 -07:00
|
|
|
seriesAdded++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:00:37 -07:00
|
|
|
// Increment added even if there's an error so we correctly report the
|
2020-07-11 06:37:13 -07:00
|
|
|
// number of samples remaining after relabeling.
|
2023-11-16 05:22:28 -08:00
|
|
|
// We still report duplicated samples here since this number should be the exact number
|
|
|
|
// of time series exposed on a scrape after relabelling.
|
2020-06-04 08:00:37 -07:00
|
|
|
added++
|
2023-11-16 06:07:37 -08:00
|
|
|
exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
|
2023-07-13 05:16:10 -07:00
|
|
|
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
2021-03-16 02:47:45 -07:00
|
|
|
if !e.HasTs {
|
2023-11-16 06:07:37 -08:00
|
|
|
if isHistogram {
|
|
|
|
// We drop exemplars for native histograms if they don't have a timestamp.
|
|
|
|
// Missing timestamps are deliberately not supported as we want to start
|
|
|
|
// enforcing timestamps for exemplars as otherwise proper deduplication
|
|
|
|
// is inefficient and purely based on heuristics: we cannot distinguish
|
|
|
|
// between repeated exemplars and new instances with the same values.
|
|
|
|
// This is done silently without logs as it is not an error but out of spec.
|
|
|
|
// This does not affect classic histograms so that behaviour is unchanged.
|
|
|
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
|
|
continue
|
|
|
|
}
|
2021-03-16 02:47:45 -07:00
|
|
|
e.Ts = t
|
|
|
|
}
|
2023-11-16 06:07:37 -08:00
|
|
|
exemplars = append(exemplars, e)
|
|
|
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
|
|
}
|
2023-11-24 06:38:35 -08:00
|
|
|
// Sort so that checking for duplicates / out of order is more efficient during validation.
|
|
|
|
slices.SortFunc(exemplars, exemplar.Compare)
|
2023-11-16 06:07:37 -08:00
|
|
|
outOfOrderExemplars := 0
|
|
|
|
for _, e := range exemplars {
|
2021-03-16 02:47:45 -07:00
|
|
|
_, exemplarErr := app.AppendExemplar(ref, lset, e)
|
2023-11-16 06:07:37 -08:00
|
|
|
switch {
|
|
|
|
case exemplarErr == nil:
|
|
|
|
// Do nothing.
|
|
|
|
case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar):
|
|
|
|
outOfOrderExemplars++
|
|
|
|
default:
|
2021-03-16 02:47:45 -07:00
|
|
|
// Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
|
|
|
}
|
2023-11-16 06:07:37 -08:00
|
|
|
}
|
|
|
|
if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
|
|
|
|
// Only report out of order exemplars if all are out of order, otherwise this was a partial update
|
|
|
|
// to some existing set of exemplars.
|
|
|
|
appErrs.numExemplarOutOfOrder += outOfOrderExemplars
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
|
|
|
|
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
2021-03-16 02:47:45 -07:00
|
|
|
}
|
|
|
|
|
2022-08-31 06:50:05 -07:00
|
|
|
if sl.appendMetadataToWAL && metadataChanged {
|
|
|
|
if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil {
|
|
|
|
// No need to fail the scrape on errors appending metadata.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr)
|
|
|
|
}
|
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2018-01-09 07:43:28 -08:00
|
|
|
if sampleLimitErr != nil {
|
2018-05-14 13:19:53 -07:00
|
|
|
if err == nil {
|
|
|
|
err = sampleLimitErr
|
|
|
|
}
|
2018-01-09 07:43:28 -08:00
|
|
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapeSampleLimit.Inc()
|
2017-05-29 06:08:55 -07:00
|
|
|
}
|
2023-04-21 12:14:19 -07:00
|
|
|
if bucketLimitErr != nil {
|
|
|
|
if err == nil {
|
2023-05-04 11:29:50 -07:00
|
|
|
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
|
2023-04-21 12:14:19 -07:00
|
|
|
}
|
|
|
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc()
|
2023-04-21 12:14:19 -07:00
|
|
|
}
|
2020-03-25 19:31:48 -07:00
|
|
|
if appErrs.numOutOfOrder > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
|
2017-05-16 05:30:40 -07:00
|
|
|
}
|
2020-03-25 19:31:48 -07:00
|
|
|
if appErrs.numDuplicates > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates)
|
2017-05-16 05:30:40 -07:00
|
|
|
}
|
2020-03-25 19:31:48 -07:00
|
|
|
if appErrs.numOutOfBounds > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds)
|
2017-07-04 02:24:13 -07:00
|
|
|
}
|
2021-03-16 02:47:45 -07:00
|
|
|
if appErrs.numExemplarOutOfOrder > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder)
|
|
|
|
}
|
2017-04-13 10:07:23 -07:00
|
|
|
if err == nil {
|
2017-05-26 01:44:48 -07:00
|
|
|
sl.cache.forEachStale(func(lset labels.Labels) bool {
|
|
|
|
// Series no longer exposed, mark it stale.
|
2021-02-18 04:07:00 -08:00
|
|
|
_, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN))
|
2023-11-01 12:06:46 -07:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2017-05-26 01:44:48 -07:00
|
|
|
// Do not count these in logging, as this is expected if a target
|
|
|
|
// goes away and comes back again with a new scrape loop.
|
|
|
|
err = nil
|
2017-04-13 10:07:23 -07:00
|
|
|
}
|
2017-05-26 01:44:48 -07:00
|
|
|
return err == nil
|
|
|
|
})
|
2017-04-13 10:07:23 -07:00
|
|
|
}
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
|
|
|
|
2020-03-25 19:31:48 -07:00
|
|
|
// Adds samples to the appender, checking the error, and then returns the # of samples added,
|
2023-04-21 12:14:19 -07:00
|
|
|
// whether the caller should continue to process more samples, and any sample or bucket limit errors.
|
2023-11-28 09:42:29 -08:00
|
|
|
func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
2023-11-01 12:06:46 -07:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2020-03-25 19:31:48 -07:00
|
|
|
return true, nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, storage.ErrNotFound):
|
2020-03-25 19:31:48 -07:00
|
|
|
return false, storage.ErrNotFound
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample):
|
2020-03-25 19:31:48 -07:00
|
|
|
appErrs.numOutOfOrder++
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapeSampleOutOfOrder.Inc()
|
2020-03-25 19:31:48 -07:00
|
|
|
return false, nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2020-03-25 19:31:48 -07:00
|
|
|
appErrs.numDuplicates++
|
|
|
|
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapeSampleDuplicate.Inc()
|
2020-03-25 19:31:48 -07:00
|
|
|
return false, nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, storage.ErrOutOfBounds):
|
2020-03-25 19:31:48 -07:00
|
|
|
appErrs.numOutOfBounds++
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
|
2023-09-22 09:47:44 -07:00
|
|
|
sl.metrics.targetScrapeSampleOutOfBounds.Inc()
|
2020-03-25 19:31:48 -07:00
|
|
|
return false, nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, errSampleLimit):
|
2020-03-25 19:31:48 -07:00
|
|
|
// Keep on parsing output if we hit the limit, so we report the correct
|
|
|
|
// total number of samples scraped.
|
|
|
|
*sampleLimitErr = err
|
|
|
|
return false, nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, errBucketLimit):
|
2023-04-21 12:14:19 -07:00
|
|
|
// Keep on parsing output if we hit the limit, so we report the correct
|
|
|
|
// total number of samples scraped.
|
|
|
|
*bucketLimitErr = err
|
|
|
|
return false, nil
|
2020-03-25 19:31:48 -07:00
|
|
|
default:
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-15 02:08:51 -07:00
|
|
|
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
|
|
|
// with scraped metrics in the cache.
|
2022-12-20 08:54:07 -08:00
|
|
|
var (
|
|
|
|
scrapeHealthMetricName = []byte("up" + "\xff")
|
|
|
|
scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff")
|
|
|
|
scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff")
|
|
|
|
samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff")
|
|
|
|
scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff")
|
|
|
|
scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff")
|
|
|
|
scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff")
|
|
|
|
scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff")
|
2017-09-15 02:08:51 -07:00
|
|
|
)
|
|
|
|
|
2021-10-24 14:45:31 -07:00
|
|
|
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
|
2020-03-13 12:54:47 -07:00
|
|
|
sl.scraper.Report(start, duration, scrapeErr)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2017-01-15 08:33:07 -08:00
|
|
|
ts := timestamp.FromTime(start)
|
2016-02-22 07:46:55 -08:00
|
|
|
|
2016-12-29 00:27:30 -08:00
|
|
|
var health float64
|
2020-03-13 12:54:47 -07:00
|
|
|
if scrapeErr == nil {
|
2016-02-22 07:46:55 -08:00
|
|
|
health = 1
|
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
|
2017-01-13 05:48:01 -08:00
|
|
|
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds(), b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2016-05-19 07:22:49 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped), b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2016-10-26 09:43:01 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added), b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-01-15 08:33:07 -08:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded), b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2019-05-08 14:24:00 -07:00
|
|
|
}
|
2021-10-24 14:45:31 -07:00
|
|
|
if sl.reportExtraMetrics {
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds(), b); err != nil {
|
2021-08-24 05:31:14 -07:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit), b); err != nil {
|
2021-09-03 06:37:42 -07:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes), b); err != nil {
|
2021-10-24 14:45:31 -07:00
|
|
|
return
|
|
|
|
}
|
2021-08-24 05:31:14 -07:00
|
|
|
}
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-01-15 08:33:07 -08:00
|
|
|
}
|
|
|
|
|
2020-07-16 04:53:39 -07:00
|
|
|
func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) {
|
2017-05-11 06:43:43 -07:00
|
|
|
ts := timestamp.FromTime(start)
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2017-05-11 06:43:43 -07:00
|
|
|
stale := math.Float64frombits(value.StaleNaN)
|
2023-10-10 13:30:19 -07:00
|
|
|
b := labels.NewBuilder(labels.EmptyLabels())
|
2017-05-11 06:43:43 -07:00
|
|
|
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale, b); err != nil {
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2019-05-08 14:24:00 -07:00
|
|
|
}
|
2021-10-24 14:45:31 -07:00
|
|
|
if sl.reportExtraMetrics {
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale, b); err != nil {
|
2021-08-24 05:31:14 -07:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale, b); err != nil {
|
2021-09-03 06:37:42 -07:00
|
|
|
return
|
|
|
|
}
|
2023-10-10 13:30:19 -07:00
|
|
|
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale, b); err != nil {
|
2021-10-24 14:45:31 -07:00
|
|
|
return
|
|
|
|
}
|
2021-08-24 05:31:14 -07:00
|
|
|
}
|
2020-03-13 12:54:47 -07:00
|
|
|
return
|
2017-05-11 06:43:43 -07:00
|
|
|
}
|
|
|
|
|
2023-10-10 13:30:19 -07:00
|
|
|
func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error {
|
2024-02-27 04:09:32 -08:00
|
|
|
ce, ok, _ := sl.cache.get(s)
|
2021-11-06 03:10:04 -07:00
|
|
|
var ref storage.SeriesRef
|
2021-02-18 04:07:00 -08:00
|
|
|
var lset labels.Labels
|
2017-01-15 08:33:07 -08:00
|
|
|
if ok {
|
2021-02-18 04:07:00 -08:00
|
|
|
ref = ce.ref
|
|
|
|
lset = ce.lset
|
|
|
|
} else {
|
2022-03-09 14:26:24 -08:00
|
|
|
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
|
|
|
// with scraped metrics in the cache.
|
|
|
|
// We have to drop it when building the actual metric.
|
2023-10-10 13:30:19 -07:00
|
|
|
b.Reset(labels.EmptyLabels())
|
|
|
|
b.Set(labels.MetricName, string(s[:len(s)-1]))
|
|
|
|
lset = sl.reportSampleMutator(b.Labels())
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2017-09-08 05:34:45 -07:00
|
|
|
|
2021-02-18 04:07:00 -08:00
|
|
|
ref, err := app.Append(ref, lset, t, v)
|
2023-11-01 12:06:46 -07:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2021-02-18 04:07:00 -08:00
|
|
|
if !ok {
|
|
|
|
sl.cache.addRef(s, ref, lset, lset.Hash())
|
|
|
|
}
|
2017-05-11 06:43:43 -07:00
|
|
|
return nil
|
2023-11-01 12:06:46 -07:00
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2021-02-18 04:07:00 -08:00
|
|
|
// Do not log here, as this is expected if a target goes away and comes back
|
|
|
|
// again with a new scrape loop.
|
2017-05-11 06:43:43 -07:00
|
|
|
return nil
|
|
|
|
default:
|
2017-01-15 08:33:07 -08:00
|
|
|
return err
|
2017-01-13 05:48:01 -08:00
|
|
|
}
|
2016-02-22 07:46:55 -08:00
|
|
|
}
|
2020-01-22 04:13:47 -08:00
|
|
|
|
|
|
|
// zeroConfig returns a new scrape config that only contains configuration items
|
|
|
|
// that alter metrics.
|
|
|
|
func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig {
|
|
|
|
z := *c
|
|
|
|
// We zero out the fields that for sure don't affect scrape.
|
|
|
|
z.ScrapeInterval = 0
|
|
|
|
z.ScrapeTimeout = 0
|
|
|
|
z.SampleLimit = 0
|
|
|
|
z.HTTPClientConfig = config_util.HTTPClientConfig{}
|
|
|
|
return &z
|
|
|
|
}
|
|
|
|
|
2020-03-02 00:21:24 -08:00
|
|
|
// reusableCache compares two scrape config and tells whether the cache is still
|
2020-01-22 04:13:47 -08:00
|
|
|
// valid.
|
|
|
|
func reusableCache(r, l *config.ScrapeConfig) bool {
|
|
|
|
if r == nil || l == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return reflect.DeepEqual(zeroConfig(r), zeroConfig(l))
|
|
|
|
}
|
2022-03-16 01:45:15 -07:00
|
|
|
|
|
|
|
// CtxKey is a dedicated type for keys of context-embedded values propagated
|
|
|
|
// with the scrape context.
|
|
|
|
type ctxKey int
|
|
|
|
|
|
|
|
// Valid CtxKey values.
|
|
|
|
const (
|
|
|
|
ctxKeyMetadata ctxKey = iota + 1
|
2022-03-24 08:53:04 -07:00
|
|
|
ctxKeyTarget
|
2022-03-16 01:45:15 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
func ContextWithMetricMetadataStore(ctx context.Context, s MetricMetadataStore) context.Context {
|
|
|
|
return context.WithValue(ctx, ctxKeyMetadata, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func MetricMetadataStoreFromContext(ctx context.Context) (MetricMetadataStore, bool) {
|
|
|
|
s, ok := ctx.Value(ctxKeyMetadata).(MetricMetadataStore)
|
|
|
|
return s, ok
|
|
|
|
}
|
2022-03-24 08:53:04 -07:00
|
|
|
|
|
|
|
func ContextWithTarget(ctx context.Context, t *Target) context.Context {
|
|
|
|
return context.WithValue(ctx, ctxKeyTarget, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TargetFromContext(ctx context.Context) (*Target, bool) {
|
|
|
|
t, ok := ctx.Value(ctxKeyTarget).(*Target)
|
|
|
|
return t, ok
|
|
|
|
}
|
2024-01-17 07:58:54 -08:00
|
|
|
|
|
|
|
func pickSchema(bucketFactor float64) int32 {
|
|
|
|
if bucketFactor <= 1 {
|
|
|
|
bucketFactor = 1.00271
|
|
|
|
}
|
|
|
|
floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
|
|
|
|
switch {
|
2024-02-28 05:06:43 -08:00
|
|
|
case floor >= float64(histogram.ExponentialSchemaMax):
|
|
|
|
return histogram.ExponentialSchemaMax
|
|
|
|
case floor <= float64(histogram.ExponentialSchemaMin):
|
|
|
|
return histogram.ExponentialSchemaMin
|
2024-01-17 07:58:54 -08:00
|
|
|
default:
|
|
|
|
return int32(floor)
|
|
|
|
}
|
|
|
|
}
|