mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' into linasm/optimize-floatBucketIterator2
Signed-off-by: Linas Medžiūnas <linasm@users.noreply.github.com>
This commit is contained in:
commit
e3e22abadb
|
@ -52,7 +52,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.48 | 2023-10-04 | **searching for volunteer** |
|
||||
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
||||
| v2.49 | 2023-11-15 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -202,8 +202,10 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableProtobufNegotiation = true
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
|
@ -619,8 +621,18 @@ func main() {
|
|||
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify"))
|
||||
}
|
||||
|
||||
scrapeManager, err := scrape.NewManager(
|
||||
&cfg.scrape,
|
||||
log.With(logger, "component", "scrape manager"),
|
||||
fanoutStorage,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var (
|
||||
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
|
||||
tracingManager = tracing.NewManager(logger)
|
||||
|
||||
queryEngine *promql.Engine
|
||||
|
|
15
cmd/promtool/testdata/no-test-group-interval.yml
vendored
Normal file
15
cmd/promtool/testdata/no-test-group-interval.yml
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
tests:
|
||||
- input_series:
|
||||
- series: test
|
||||
values: 0 1
|
||||
promql_expr_test:
|
||||
- expr: test
|
||||
eval_time: 59s
|
||||
exp_samples:
|
||||
- value: 0
|
||||
labels: test
|
||||
- expr: test
|
||||
eval_time: 1m
|
||||
exp_samples:
|
||||
- value: 1
|
||||
labels: test
|
|
@ -18,7 +18,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -459,7 +458,16 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
postingInfos := []postingInfo{}
|
||||
|
||||
printInfo := func(postingInfos []postingInfo) {
|
||||
slices.SortFunc(postingInfos, func(a, b postingInfo) int { return int(b.metric) - int(a.metric) })
|
||||
slices.SortFunc(postingInfos, func(a, b postingInfo) int {
|
||||
switch {
|
||||
case b.metric < a.metric:
|
||||
return -1
|
||||
case b.metric > a.metric:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
for i, pc := range postingInfos {
|
||||
if i >= limit {
|
||||
|
@ -620,10 +628,12 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
err = tsdb_errors.NewMulti(err, chunkr.Close()).Err()
|
||||
}()
|
||||
|
||||
const maxSamplesPerChunk = 120
|
||||
nBuckets := 10
|
||||
histogram := make([]int, nBuckets)
|
||||
totalChunks := 0
|
||||
floatChunkSamplesCount := make([]int, 0)
|
||||
floatChunkSize := make([]int, 0)
|
||||
histogramChunkSamplesCount := make([]int, 0)
|
||||
histogramChunkSize := make([]int, 0)
|
||||
histogramChunkBucketsCount := make([]int, 0)
|
||||
var builder labels.ScratchBuilder
|
||||
for postingsr.Next() {
|
||||
var chks []chunks.Meta
|
||||
|
@ -637,26 +647,56 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkSize := math.Min(float64(chk.NumSamples()), maxSamplesPerChunk)
|
||||
// Calculate the bucket for the chunk and increment it in the histogram.
|
||||
bucket := int(math.Ceil(float64(nBuckets)*chunkSize/maxSamplesPerChunk)) - 1
|
||||
histogram[bucket]++
|
||||
switch chk.Encoding() {
|
||||
case chunkenc.EncXOR:
|
||||
floatChunkSamplesCount = append(floatChunkSamplesCount, chk.NumSamples())
|
||||
floatChunkSize = append(floatChunkSize, len(chk.Bytes()))
|
||||
case chunkenc.EncFloatHistogram:
|
||||
histogramChunkSamplesCount = append(histogramChunkSamplesCount, chk.NumSamples())
|
||||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not FloatHistogramChunk")
|
||||
}
|
||||
it := fhchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
for it.Next() == chunkenc.ValFloatHistogram {
|
||||
_, f := it.AtFloatHistogram()
|
||||
bucketCount += len(f.PositiveBuckets)
|
||||
bucketCount += len(f.NegativeBuckets)
|
||||
}
|
||||
histogramChunkBucketsCount = append(histogramChunkBucketsCount, bucketCount)
|
||||
case chunkenc.EncHistogram:
|
||||
histogramChunkSamplesCount = append(histogramChunkSamplesCount, chk.NumSamples())
|
||||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not HistogramChunk")
|
||||
}
|
||||
it := hchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
for it.Next() == chunkenc.ValHistogram {
|
||||
_, f := it.AtHistogram()
|
||||
bucketCount += len(f.PositiveBuckets)
|
||||
bucketCount += len(f.NegativeBuckets)
|
||||
}
|
||||
histogramChunkBucketsCount = append(histogramChunkBucketsCount, bucketCount)
|
||||
}
|
||||
totalChunks++
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nCompaction analysis:\n")
|
||||
fmt.Println("Fullness: Amount of samples in chunks (100% is 120 samples)")
|
||||
// Normalize absolute counts to percentages and print them out.
|
||||
for bucket, count := range histogram {
|
||||
percentage := 100.0 * count / totalChunks
|
||||
fmt.Printf("%7d%%: ", (bucket+1)*10)
|
||||
for j := 0; j < percentage; j++ {
|
||||
fmt.Printf("#")
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println()
|
||||
displayHistogram("samples per float chunk", floatChunkSamplesCount, totalChunks)
|
||||
|
||||
displayHistogram("bytes per float chunk", floatChunkSize, totalChunks)
|
||||
|
||||
displayHistogram("samples per histogram chunk", histogramChunkSamplesCount, totalChunks)
|
||||
|
||||
displayHistogram("bytes per histogram chunk", histogramChunkSize, totalChunks)
|
||||
|
||||
displayHistogram("buckets per histogram chunk", histogramChunkBucketsCount, totalChunks)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -732,3 +772,42 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
|
|||
|
||||
return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration))
|
||||
}
|
||||
|
||||
func displayHistogram(dataType string, datas []int, total int) {
|
||||
slices.Sort(datas)
|
||||
start, end, step := generateBucket(datas[0], datas[len(datas)-1])
|
||||
sum := 0
|
||||
buckets := make([]int, (end-start)/step+1)
|
||||
maxCount := 0
|
||||
for _, c := range datas {
|
||||
sum += c
|
||||
buckets[(c-start)/step]++
|
||||
if buckets[(c-start)/step] > maxCount {
|
||||
maxCount = buckets[(c-start)/step]
|
||||
}
|
||||
}
|
||||
avg := sum / len(datas)
|
||||
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
|
||||
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
|
||||
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
|
||||
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
|
||||
for bucket, count := range buckets {
|
||||
percentage := 100.0 * count / total
|
||||
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func generateBucket(min, max int) (start, end, step int) {
|
||||
s := (max - min) / 10
|
||||
|
||||
step = 10
|
||||
for step < s && step <= 10000 {
|
||||
step *= 10
|
||||
}
|
||||
|
||||
start = min - min%step
|
||||
end = max - max%step + step
|
||||
|
||||
return
|
||||
}
|
||||
|
|
43
cmd/promtool/tsdb_test.go
Normal file
43
cmd/promtool/tsdb_test.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenerateBucket(t *testing.T) {
|
||||
tcs := []struct {
|
||||
min, max int
|
||||
start, end, step int
|
||||
}{
|
||||
{
|
||||
min: 101,
|
||||
max: 141,
|
||||
start: 100,
|
||||
end: 150,
|
||||
step: 10,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
start, end, step := generateBucket(tc.min, tc.max)
|
||||
|
||||
require.Equal(t, tc.start, start)
|
||||
require.Equal(t, tc.end, end)
|
||||
require.Equal(t, tc.step, step)
|
||||
}
|
||||
}
|
|
@ -96,6 +96,9 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
|||
// Testing.
|
||||
var errs []error
|
||||
for _, t := range unitTestInp.Tests {
|
||||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
errs = append(errs, ers...)
|
||||
|
|
|
@ -112,6 +112,16 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "No test group interval",
|
||||
args: args{
|
||||
files: []string{"./testdata/no-test-group-interval.yml"},
|
||||
},
|
||||
queryOpts: promql.LazyLoaderOpts{
|
||||
EnableNegativeOffset: true,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
100
config/config.go
100
config/config.go
|
@ -19,6 +19,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -143,12 +144,14 @@ var (
|
|||
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||
EvaluationInterval: model.Duration(1 * time.Minute),
|
||||
// When native histogram feature flag is enabled, ScrapeProtocols default
|
||||
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
}
|
||||
|
||||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout and ScrapeInterval default to the configured
|
||||
// globals.
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -260,7 +263,7 @@ func (c Config) String() string {
|
|||
return string(b)
|
||||
}
|
||||
|
||||
// ScrapeConfigs returns the scrape configurations.
|
||||
// GetScrapeConfigs returns the scrape configurations.
|
||||
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||
|
||||
|
@ -385,6 +388,11 @@ type GlobalConfig struct {
|
|||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||
// The default timeout when scraping targets.
|
||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what weight (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// How frequently to evaluate rules by default.
|
||||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||
// File to which PromQL queries are logged.
|
||||
|
@ -414,6 +422,68 @@ type GlobalConfig struct {
|
|||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
type ScrapeProtocol string
|
||||
|
||||
// Validate returns error if given scrape protocol is not supported.
|
||||
func (s ScrapeProtocol) Validate() error {
|
||||
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||
return fmt.Errorf("unknown scrape protocol %v, supported: %v",
|
||||
s, func() (ret []string) {
|
||||
for k := range ScrapeProtocolsHeaders {
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
sort.Strings(ret)
|
||||
return ret
|
||||
}())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||
|
||||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||
}
|
||||
|
||||
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{
|
||||
PrometheusProto,
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
)
|
||||
|
||||
// validateAcceptScrapeProtocols return errors if we see problems with accept scrape protocols option.
|
||||
func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
|
||||
if len(sps) == 0 {
|
||||
return errors.New("scrape_protocols cannot be empty")
|
||||
}
|
||||
dups := map[string]struct{}{}
|
||||
for _, sp := range sps {
|
||||
if _, ok := dups[strings.ToLower(string(sp))]; ok {
|
||||
return fmt.Errorf("duplicated protocol in scrape_protocols, got %v", sps)
|
||||
}
|
||||
if err := sp.Validate(); err != nil {
|
||||
return fmt.Errorf("scrape_protocols: %w", err)
|
||||
}
|
||||
dups[strings.ToLower(string(sp))] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *GlobalConfig) SetDirectory(dir string) {
|
||||
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
||||
|
@ -459,6 +529,14 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if gc.EvaluationInterval == 0 {
|
||||
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
||||
}
|
||||
|
||||
if gc.ScrapeProtocols == nil {
|
||||
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for global config", err)
|
||||
}
|
||||
|
||||
*c = *gc
|
||||
return nil
|
||||
}
|
||||
|
@ -469,7 +547,8 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.ScrapeInterval == 0 &&
|
||||
c.ScrapeTimeout == 0 &&
|
||||
c.EvaluationInterval == 0 &&
|
||||
c.QueryLogFile == ""
|
||||
c.QueryLogFile == "" &&
|
||||
c.ScrapeProtocols == nil
|
||||
}
|
||||
|
||||
type ScrapeConfigs struct {
|
||||
|
@ -490,6 +569,11 @@ type ScrapeConfig struct {
|
|||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||
// The timeout for scraping targets of this config.
|
||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
|
@ -577,6 +661,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Validate validates scrape config, but also fills relevant default values from global config if needed.
|
||||
func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||
if c == nil {
|
||||
return errors.New("empty or null scrape config section")
|
||||
|
@ -618,6 +703,13 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||
}
|
||||
|
||||
if c.ScrapeProtocols == nil {
|
||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||
}
|
||||
if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil {
|
||||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
},
|
||||
|
||||
RuleFiles: []string{
|
||||
|
@ -191,6 +192,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -292,6 +294,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: 35,
|
||||
LabelNameLengthLimit: 210,
|
||||
LabelValueLengthLimit: 210,
|
||||
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
BasicAuth: &config.BasicAuth{
|
||||
|
@ -387,6 +390,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -440,6 +444,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
|
@ -471,6 +476,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -508,6 +514,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -545,6 +552,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -571,6 +579,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -606,6 +615,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -638,6 +648,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -677,6 +688,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -706,6 +718,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -738,6 +751,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -763,6 +777,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -791,6 +806,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/federate",
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -819,6 +835,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -847,6 +864,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -872,6 +890,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -905,6 +924,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -937,6 +957,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -965,6 +986,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -993,6 +1015,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1025,6 +1048,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1060,6 +1084,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1114,6 +1139,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1139,6 +1165,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1175,6 +1202,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1217,6 +1245,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1250,6 +1279,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1277,6 +1307,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1307,6 +1338,7 @@ var expectedConf = &Config{
|
|||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1925,6 +1957,14 @@ var expectedErrors = []struct {
|
|||
filename: "scrape_config_files_glob.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
|
@ -2014,10 +2054,12 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
|||
func TestGetScrapeConfigs(t *testing.T) {
|
||||
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig {
|
||||
return &ScrapeConfig{
|
||||
JobName: jobName,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: scrapeInterval,
|
||||
ScrapeTimeout: scrapeTimeout,
|
||||
JobName: jobName,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: scrapeInterval,
|
||||
ScrapeTimeout: scrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -2071,6 +2113,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(60 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -2101,6 +2144,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
|
|
2
config/testdata/conf.good.yml
vendored
2
config/testdata/conf.good.yml
vendored
|
@ -114,6 +114,7 @@ scrape_configs:
|
|||
|
||||
scrape_interval: 50s
|
||||
scrape_timeout: 5s
|
||||
scrape_protocols: ["PrometheusText0.0.4"]
|
||||
|
||||
body_size_limit: 10MB
|
||||
sample_limit: 1000
|
||||
|
@ -122,7 +123,6 @@ scrape_configs:
|
|||
label_name_length_limit: 210
|
||||
label_value_length_limit: 210
|
||||
|
||||
|
||||
metrics_path: /my_path
|
||||
scheme: https
|
||||
|
||||
|
|
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
scrape_protocols: ["prometheusproto"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
scrape_protocols: ["OpenMetricsText1.0.0", "PrometheusProto", "OpenMetricsText1.0.0"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -23,11 +23,13 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -68,7 +70,7 @@ var (
|
|||
DefaultSDConfig = SDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Environment: azure.PublicCloud.Name,
|
||||
Environment: "AzurePublicCloud",
|
||||
AuthenticationMethod: authMethodOAuth,
|
||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
@ -80,6 +82,26 @@ var (
|
|||
})
|
||||
)
|
||||
|
||||
var environments = map[string]cloud.Configuration{
|
||||
"AZURECHINACLOUD": cloud.AzureChina,
|
||||
"AZURECLOUD": cloud.AzurePublic,
|
||||
"AZUREGERMANCLOUD": cloud.AzurePublic,
|
||||
"AZUREPUBLICCLOUD": cloud.AzurePublic,
|
||||
"AZUREUSGOVERNMENT": cloud.AzureGovernment,
|
||||
"AZUREUSGOVERNMENTCLOUD": cloud.AzureGovernment,
|
||||
}
|
||||
|
||||
// CloudConfigurationFromName returns cloud configuration based on the common name specified.
|
||||
func CloudConfigurationFromName(name string) (cloud.Configuration, error) {
|
||||
name = strings.ToUpper(name)
|
||||
env, ok := environments[name]
|
||||
if !ok {
|
||||
return env, fmt.Errorf("There is no cloud configuration matching the name %q", name)
|
||||
}
|
||||
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
prometheus.MustRegister(failuresCount)
|
||||
|
@ -175,80 +197,88 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery {
|
|||
|
||||
// azureClient represents multiple Azure Resource Manager providers.
|
||||
type azureClient struct {
|
||||
nic network.InterfacesClient
|
||||
vm compute.VirtualMachinesClient
|
||||
vmss compute.VirtualMachineScaleSetsClient
|
||||
vmssvm compute.VirtualMachineScaleSetVMsClient
|
||||
nic *armnetwork.InterfacesClient
|
||||
vm *armcompute.VirtualMachinesClient
|
||||
vmss *armcompute.VirtualMachineScaleSetsClient
|
||||
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
||||
func createAzureClient(cfg SDConfig) (azureClient, error) {
|
||||
env, err := azure.EnvironmentFromName(cfg.Environment)
|
||||
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
activeDirectoryEndpoint := env.ActiveDirectoryEndpoint
|
||||
resourceManagerEndpoint := env.ResourceManagerEndpoint
|
||||
|
||||
var c azureClient
|
||||
|
||||
var spt *adal.ServicePrincipalToken
|
||||
telemetry := policy.TelemetryOptions{
|
||||
ApplicationID: userAgent,
|
||||
}
|
||||
|
||||
switch cfg.AuthenticationMethod {
|
||||
case authMethodManagedIdentity:
|
||||
spt, err = adal.NewServicePrincipalTokenFromManagedIdentity(resourceManagerEndpoint, &adal.ManagedIdentityOptions{ClientID: cfg.ClientID})
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
case authMethodOAuth:
|
||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, cfg.TenantID)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
spt, err = adal.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), resourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
credential, err := newCredential(cfg, policy.ClientOptions{
|
||||
Cloud: cloudConfiguration,
|
||||
Telemetry: telemetry,
|
||||
})
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
sender := autorest.DecorateSender(client)
|
||||
preparer := autorest.WithUserAgent(userAgent)
|
||||
options := &arm.ClientOptions{
|
||||
ClientOptions: policy.ClientOptions{
|
||||
Transport: client,
|
||||
Cloud: cloudConfiguration,
|
||||
Telemetry: telemetry,
|
||||
},
|
||||
}
|
||||
|
||||
bearerAuthorizer := autorest.NewBearerAuthorizer(spt)
|
||||
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vm.Authorizer = bearerAuthorizer
|
||||
c.vm.Sender = sender
|
||||
c.vm.RequestInspector = preparer
|
||||
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.nic.Authorizer = bearerAuthorizer
|
||||
c.nic.Sender = sender
|
||||
c.nic.RequestInspector = preparer
|
||||
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vmss.Authorizer = bearerAuthorizer
|
||||
c.vmss.Sender = sender
|
||||
c.vmss.RequestInspector = preparer
|
||||
|
||||
c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
||||
c.vmssvm.Authorizer = bearerAuthorizer
|
||||
c.vmssvm.Sender = sender
|
||||
c.vmssvm.RequestInspector = preparer
|
||||
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return azureClient{}, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// azureResource represents a resource identifier in Azure.
|
||||
type azureResource struct {
|
||||
Name string
|
||||
ResourceGroup string
|
||||
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
|
||||
var credential azcore.TokenCredential
|
||||
switch cfg.AuthenticationMethod {
|
||||
case authMethodManagedIdentity:
|
||||
options := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: policyClientOptions, ID: azidentity.ClientID(cfg.ClientID)}
|
||||
managedIdentityCredential, err := azidentity.NewManagedIdentityCredential(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
credential = azcore.TokenCredential(managedIdentityCredential)
|
||||
case authMethodOAuth:
|
||||
options := &azidentity.ClientSecretCredentialOptions{ClientOptions: policyClientOptions}
|
||||
secretCredential, err := azidentity.NewClientSecretCredential(cfg.TenantID, cfg.ClientID, string(cfg.ClientSecret), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
credential = azcore.TokenCredential(secretCredential)
|
||||
}
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS)
|
||||
|
@ -266,22 +296,17 @@ type virtualMachine struct {
|
|||
}
|
||||
|
||||
// Create a new azureResource object from an ID string.
|
||||
func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error) {
|
||||
// Resource IDs have the following format.
|
||||
// /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME
|
||||
// or if embedded resource then
|
||||
// /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME/TYPE/NAME
|
||||
s := strings.Split(id, "/")
|
||||
if len(s) != 9 && len(s) != 11 {
|
||||
err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id)
|
||||
level.Error(logger).Log("err", err)
|
||||
return azureResource{}, err
|
||||
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
return azureResource{
|
||||
Name: strings.ToLower(s[8]),
|
||||
ResourceGroup: strings.ToLower(s[4]),
|
||||
}, nil
|
||||
resourceID, err := arm.ParseResourceID(id)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
||||
level.Error(logger).Log("err", err)
|
||||
return &arm.ResourceID{}, err
|
||||
}
|
||||
return resourceID, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
|
@ -292,6 +317,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||
}
|
||||
client.logger = d.logger
|
||||
|
||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||
if err != nil {
|
||||
|
@ -344,7 +370,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
|
||||
azureLabelMachineOSType: model.LabelValue(vm.OsType),
|
||||
azureLabelMachineLocation: model.LabelValue(vm.Location),
|
||||
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
|
||||
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
|
||||
azureLabelMachineSize: model.LabelValue(vm.Size),
|
||||
}
|
||||
|
||||
|
@ -370,7 +396,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
return
|
||||
}
|
||||
|
||||
if networkInterface.InterfacePropertiesFormat == nil {
|
||||
if networkInterface.Properties == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -378,21 +404,21 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
// This information is available via another API call however the Go SDK does not
|
||||
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||
if networkInterface.Primary == nil {
|
||||
if networkInterface.Properties.Primary == nil {
|
||||
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if *networkInterface.Primary {
|
||||
for _, ip := range *networkInterface.IPConfigurations {
|
||||
if *networkInterface.Properties.Primary {
|
||||
for _, ip := range networkInterface.Properties.IPConfigurations {
|
||||
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
|
||||
// therefore we need to validate that both are not nil.
|
||||
if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil && ip.PublicIPAddress.IPAddress != nil {
|
||||
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress)
|
||||
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
|
||||
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
|
||||
}
|
||||
if ip.PrivateIPAddress != nil {
|
||||
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.PrivateIPAddress)
|
||||
address := net.JoinHostPort(*ip.PrivateIPAddress, fmt.Sprintf("%d", d.port))
|
||||
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
|
||||
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
|
||||
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
|
||||
labels[model.AddressLabel] = model.LabelValue(address)
|
||||
ch <- target{labelSet: labels, err: nil}
|
||||
return
|
||||
|
@ -427,93 +453,84 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
|
||||
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
|
||||
var vms []virtualMachine
|
||||
var result compute.VirtualMachineListResultPage
|
||||
var err error
|
||||
if len(resourceGroup) == 0 {
|
||||
result, err = client.vm.ListAll(ctx)
|
||||
pager := client.vm.NewListAllPager(nil)
|
||||
for pager.More() {
|
||||
nextResult, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||
}
|
||||
for _, vm := range nextResult.Value {
|
||||
vms = append(vms, mapFromVM(*vm))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
result, err = client.vm.List(ctx, resourceGroup)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||
}
|
||||
for result.NotDone() {
|
||||
for _, vm := range result.Values() {
|
||||
vms = append(vms, mapFromVM(vm))
|
||||
}
|
||||
err = result.NextWithContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||
pager := client.vm.NewListPager(resourceGroup, nil)
|
||||
for pager.More() {
|
||||
nextResult, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||
}
|
||||
for _, vm := range nextResult.Value {
|
||||
vms = append(vms, mapFromVM(*vm))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vms, nil
|
||||
}
|
||||
|
||||
type VmssListResultPage interface {
|
||||
NextWithContext(ctx context.Context) (err error)
|
||||
NotDone() bool
|
||||
Values() []compute.VirtualMachineScaleSet
|
||||
}
|
||||
|
||||
func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]compute.VirtualMachineScaleSet, error) {
|
||||
var scaleSets []compute.VirtualMachineScaleSet
|
||||
var result VmssListResultPage
|
||||
var err error
|
||||
func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
|
||||
var scaleSets []armcompute.VirtualMachineScaleSet
|
||||
if len(resourceGroup) == 0 {
|
||||
var rtn compute.VirtualMachineScaleSetListWithLinkResultPage
|
||||
rtn, err = client.vmss.ListAll(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||
pager := client.vmss.NewListAllPager(nil)
|
||||
for pager.More() {
|
||||
nextResult, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||
}
|
||||
for _, vmss := range nextResult.Value {
|
||||
scaleSets = append(scaleSets, *vmss)
|
||||
}
|
||||
}
|
||||
result = &rtn
|
||||
} else {
|
||||
var rtn compute.VirtualMachineScaleSetListResultPage
|
||||
rtn, err = client.vmss.List(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||
}
|
||||
result = &rtn
|
||||
}
|
||||
|
||||
for result.NotDone() {
|
||||
scaleSets = append(scaleSets, result.Values()...)
|
||||
err = result.NextWithContext(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||
pager := client.vmss.NewListPager(resourceGroup, nil)
|
||||
for pager.More() {
|
||||
nextResult, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||
}
|
||||
for _, vmss := range nextResult.Value {
|
||||
scaleSets = append(scaleSets, *vmss)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scaleSets, nil
|
||||
}
|
||||
|
||||
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
||||
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
||||
var vms []virtualMachine
|
||||
// TODO do we really need to fetch the resourcegroup this way?
|
||||
r, err := newAzureResourceFromID(*scaleSet.ID, nil)
|
||||
r, err := newAzureResourceFromID(*scaleSet.ID, client.logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse scale set ID: %w", err)
|
||||
}
|
||||
|
||||
result, err := client.vmssvm.List(ctx, r.ResourceGroup, *(scaleSet.Name), "", "", "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err)
|
||||
}
|
||||
for result.NotDone() {
|
||||
for _, vm := range result.Values() {
|
||||
vms = append(vms, mapFromVMScaleSetVM(vm, *scaleSet.Name))
|
||||
}
|
||||
err = result.NextWithContext(ctx)
|
||||
pager := client.vmssvm.NewListPager(r.ResourceGroupName, *(scaleSet.Name), nil)
|
||||
for pager.More() {
|
||||
nextResult, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err)
|
||||
}
|
||||
for _, vmssvm := range nextResult.Value {
|
||||
vms = append(vms, mapFromVMScaleSetVM(*vmssvm, *scaleSet.Name))
|
||||
}
|
||||
}
|
||||
|
||||
return vms, nil
|
||||
}
|
||||
|
||||
func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
||||
osType := string(vm.StorageProfile.OsDisk.OsType)
|
||||
func mapFromVM(vm armcompute.VirtualMachine) virtualMachine {
|
||||
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType)
|
||||
tags := map[string]*string{}
|
||||
networkInterfaces := []string{}
|
||||
var computerName string
|
||||
|
@ -523,18 +540,17 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
|||
tags = vm.Tags
|
||||
}
|
||||
|
||||
if vm.NetworkProfile != nil {
|
||||
for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) {
|
||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||
if vm.Properties != nil {
|
||||
if vm.Properties.NetworkProfile != nil {
|
||||
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
|
||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vm.VirtualMachineProperties != nil {
|
||||
if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
|
||||
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
|
||||
if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil {
|
||||
computerName = *(vm.Properties.OSProfile.ComputerName)
|
||||
}
|
||||
if vm.VirtualMachineProperties.HardwareProfile != nil {
|
||||
size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize)
|
||||
if vm.Properties.HardwareProfile != nil {
|
||||
size = string(*vm.Properties.HardwareProfile.VMSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -552,8 +568,8 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
|||
}
|
||||
}
|
||||
|
||||
func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine {
|
||||
osType := string(vm.StorageProfile.OsDisk.OsType)
|
||||
func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine {
|
||||
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType)
|
||||
tags := map[string]*string{}
|
||||
networkInterfaces := []string{}
|
||||
var computerName string
|
||||
|
@ -563,18 +579,17 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
|
|||
tags = vm.Tags
|
||||
}
|
||||
|
||||
if vm.NetworkProfile != nil {
|
||||
for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) {
|
||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||
if vm.Properties != nil {
|
||||
if vm.Properties.NetworkProfile != nil {
|
||||
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
|
||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vm.VirtualMachineScaleSetVMProperties != nil {
|
||||
if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil {
|
||||
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
|
||||
if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil {
|
||||
computerName = *(vm.Properties.OSProfile.ComputerName)
|
||||
}
|
||||
if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil {
|
||||
size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize)
|
||||
if vm.Properties.HardwareProfile != nil {
|
||||
size = string(*vm.Properties.HardwareProfile.VMSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -596,36 +611,20 @@ var errorNotFound = errors.New("network interface does not exist")
|
|||
|
||||
// getNetworkInterfaceByID gets the network interface.
|
||||
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
||||
// On all other errors, an autorest.DetailedError is returned.
|
||||
func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*network.Interface, error) {
|
||||
result := network.Interface{}
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": "2018-10-01",
|
||||
func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
||||
r, err := newAzureResourceFromID(networkInterfaceID, client.logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse network interface ID: %w", err)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.nic.BaseURI),
|
||||
autorest.WithPath(networkInterfaceID),
|
||||
autorest.WithQueryParameters(queryParameters),
|
||||
autorest.WithUserAgent(userAgent))
|
||||
req, err := preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, nil)
|
||||
if err != nil {
|
||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
|
||||
}
|
||||
|
||||
resp, err := client.nic.GetSender(req)
|
||||
if err != nil {
|
||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
|
||||
}
|
||||
|
||||
result, err = client.nic.GetResponder(resp)
|
||||
if err != nil {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
var responseError *azcore.ResponseError
|
||||
if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound {
|
||||
return nil, errorNotFound
|
||||
}
|
||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
|
||||
return nil, fmt.Errorf("Failed to retrieve Interface %v with error: %w", networkInterfaceID, err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
return &resp.Interface, nil
|
||||
}
|
||||
|
|
|
@ -16,7 +16,8 @@ package azure
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
@ -29,34 +30,36 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
|
|||
id := "test"
|
||||
name := "name"
|
||||
size := "size"
|
||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
vmType := "type"
|
||||
location := "westeurope"
|
||||
computerName := "computer_name"
|
||||
networkProfile := compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
||||
networkProfile := armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||
}
|
||||
properties := &compute.VirtualMachineProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
properties := &armcompute.VirtualMachineProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: &computerName,
|
||||
},
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: &compute.OSDisk{
|
||||
OsType: "Linux",
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &networkProfile,
|
||||
HardwareProfile: &compute.HardwareProfile{
|
||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
}
|
||||
|
||||
testVM := compute.VirtualMachine{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: nil,
|
||||
VirtualMachineProperties: properties,
|
||||
testVM := armcompute.VirtualMachine{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: nil,
|
||||
Properties: properties,
|
||||
}
|
||||
|
||||
expectedVM := virtualMachine{
|
||||
|
@ -80,37 +83,39 @@ func TestMapFromVMWithTags(t *testing.T) {
|
|||
id := "test"
|
||||
name := "name"
|
||||
size := "size"
|
||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
vmType := "type"
|
||||
location := "westeurope"
|
||||
computerName := "computer_name"
|
||||
tags := map[string]*string{
|
||||
"prometheus": new(string),
|
||||
}
|
||||
networkProfile := compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
||||
networkProfile := armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||
}
|
||||
properties := &compute.VirtualMachineProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
properties := &armcompute.VirtualMachineProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: &computerName,
|
||||
},
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: &compute.OSDisk{
|
||||
OsType: "Linux",
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &networkProfile,
|
||||
HardwareProfile: &compute.HardwareProfile{
|
||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
}
|
||||
|
||||
testVM := compute.VirtualMachine{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: tags,
|
||||
VirtualMachineProperties: properties,
|
||||
testVM := armcompute.VirtualMachine{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: tags,
|
||||
Properties: properties,
|
||||
}
|
||||
|
||||
expectedVM := virtualMachine{
|
||||
|
@ -134,34 +139,36 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
|||
id := "test"
|
||||
name := "name"
|
||||
size := "size"
|
||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
vmType := "type"
|
||||
location := "westeurope"
|
||||
computerName := "computer_name"
|
||||
networkProfile := compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
||||
networkProfile := armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||
}
|
||||
properties := &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
properties := &armcompute.VirtualMachineScaleSetVMProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: &computerName,
|
||||
},
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: &compute.OSDisk{
|
||||
OsType: "Linux",
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &networkProfile,
|
||||
HardwareProfile: &compute.HardwareProfile{
|
||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
}
|
||||
|
||||
testVM := compute.VirtualMachineScaleSetVM{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: nil,
|
||||
VirtualMachineScaleSetVMProperties: properties,
|
||||
testVM := armcompute.VirtualMachineScaleSetVM{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: nil,
|
||||
Properties: properties,
|
||||
}
|
||||
|
||||
scaleSet := "testSet"
|
||||
|
@ -187,37 +194,39 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
|||
id := "test"
|
||||
name := "name"
|
||||
size := "size"
|
||||
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
vmType := "type"
|
||||
location := "westeurope"
|
||||
computerName := "computer_name"
|
||||
tags := map[string]*string{
|
||||
"prometheus": new(string),
|
||||
}
|
||||
networkProfile := compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
||||
networkProfile := armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||
}
|
||||
properties := &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
properties := &armcompute.VirtualMachineScaleSetVMProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: &computerName,
|
||||
},
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
OsDisk: &compute.OSDisk{
|
||||
OsType: "Linux",
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &networkProfile,
|
||||
HardwareProfile: &compute.HardwareProfile{
|
||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
}
|
||||
|
||||
testVM := compute.VirtualMachineScaleSetVM{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: tags,
|
||||
VirtualMachineScaleSetVMProperties: properties,
|
||||
testVM := armcompute.VirtualMachineScaleSetVM{
|
||||
ID: &id,
|
||||
Name: &name,
|
||||
Type: &vmType,
|
||||
Location: &location,
|
||||
Tags: tags,
|
||||
Properties: properties,
|
||||
}
|
||||
|
||||
scaleSet := "testSet"
|
||||
|
@ -242,18 +251,26 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
|||
func TestNewAzureResourceFromID(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
id string
|
||||
expected azureResource
|
||||
expected *arm.ResourceID
|
||||
}{
|
||||
{
|
||||
id: "/a/b/c/group/d/e/f/name",
|
||||
expected: azureResource{"name", "group"},
|
||||
id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name",
|
||||
expected: &arm.ResourceID{
|
||||
Name: "name",
|
||||
ResourceGroupName: "group",
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "/a/b/c/group/d/e/f/name/g/h",
|
||||
expected: azureResource{"name", "group"},
|
||||
id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name/TYPE/h",
|
||||
expected: &arm.ResourceID{
|
||||
Name: "h",
|
||||
ResourceGroupName: "group",
|
||||
},
|
||||
},
|
||||
} {
|
||||
actual, _ := newAzureResourceFromID(tc.id, nil)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
actual, err := newAzureResourceFromID(tc.id, nil)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, tc.expected.Name, actual.Name)
|
||||
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,13 @@ global:
|
|||
# How long until a scrape request times out.
|
||||
[ scrape_timeout: <duration> | default = 10s ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
# The default value changes to [ PrometheusProto, OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ]
|
||||
# when native_histogram feature flag is set.
|
||||
[ scrape_protocols: [<string>, ...] | default = [ OpenMetricsText1.0.0, OpenMetricsText0.0.1, PrometheusText0.0.4 ] ]
|
||||
|
||||
# How frequently to evaluate rules.
|
||||
[ evaluation_interval: <duration> | default = 1m ]
|
||||
|
||||
|
@ -171,6 +178,11 @@ job_name: <job_name>
|
|||
# Per-scrape timeout when scraping this job.
|
||||
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
# OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||
|
||||
# Whether to scrape a classic histogram that is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
|
|
@ -39,7 +39,7 @@ tests:
|
|||
|
||||
``` yaml
|
||||
# Series data
|
||||
interval: <duration>
|
||||
[ interval: <duration> | default = evaluation_interval ]
|
||||
input_series:
|
||||
[ - <series> ]
|
||||
|
||||
|
@ -80,7 +80,7 @@ series: <string>
|
|||
# Read this as series starts at a, then n further samples incrementing by b.
|
||||
# 'a-bxn' becomes 'a a-b a-(2*b) a-(3*b) … a-(n*b)'
|
||||
# Read this as series starts at a, then n further samples decrementing by b (or incrementing by negative b).
|
||||
# 'axn' becomes 'a a a … a' (n times) - it's a shorthand for 'a+0xn'
|
||||
# 'axn' becomes 'a a a … a' (a n+1 times) - it's a shorthand for 'a+0xn'
|
||||
# There are special values to indicate missing and stale samples:
|
||||
# '_' represents a missing sample from scrape
|
||||
# 'stale' indicates a stale sample
|
||||
|
|
|
@ -10,20 +10,20 @@ require (
|
|||
github.com/influxdata/influxdb v1.11.2
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/prometheus/common v0.44.0
|
||||
github.com/prometheus/prometheus v0.45.0
|
||||
github.com/prometheus/prometheus v0.47.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.276 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.302 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
|
@ -35,31 +35,39 @@ require (
|
|||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.81.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.2.1 // indirect
|
||||
golang.org/x/crypto v0.8.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
|
||||
google.golang.org/grpc v1.56.2 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
|
@ -13,9 +13,9 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X
|
|||
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
|
||||
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU=
|
||||
github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
@ -27,8 +27,8 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu
|
|||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0=
|
||||
github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk=
|
||||
github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
@ -36,24 +36,25 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74 h1:zlUubfBUxApscKFsF4VSvvfhsBNTBu0eF/ddvpo96yk=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
|
||||
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||
github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.1 h1:kt9FtLiooDc0vbwTLhdg3dyNX1K9Qwa1EK9LcD4jVUQ=
|
||||
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
|
@ -72,9 +73,9 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
|||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
|
@ -110,27 +111,27 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
|||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gophercloud/gophercloud v1.4.0 h1:RqEu43vaX0lb0LanZr5BylK5ICVxjpFFoc0sxivyuHU=
|
||||
github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM=
|
||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||
github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230605233119-67e39d5d248f h1:yxjcAZRuYymIDC0W4IQHgTe9EQdu2BsjPlVmKwyVZT4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hetznercloud/hcloud-go v1.45.1 h1:nl0OOklFfQT5J6AaNIOhl5Ruh3fhmGmhvZEqHbibVuk=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo=
|
||||
github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.7 h1:uVG1Q/ZDJ7YmCI9Oevpue9xJEH5UrUMyXv8gm7NTxIw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
|
@ -142,10 +143,13 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
|
|||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -158,14 +162,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.17.0 h1:aWS98f0jUoY2lhsEuBxRdVkqyGM0nazPd68AEDF0EvU=
|
||||
github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -174,6 +178,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
|||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
|
@ -188,8 +193,9 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
|
@ -215,10 +221,10 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||
github.com/prometheus/prometheus v0.45.0 h1:O/uG+Nw4kNxx/jDPxmjsSDd+9Ohql6E7ZSY1x5x/0KI=
|
||||
github.com/prometheus/prometheus v0.45.0/go.mod h1:jC5hyO8ItJBnDWGecbEucMyXjzxGv9cxsxsjS9u5s1w=
|
||||
github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI=
|
||||
github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 h1:1WuWJu7/e8SqK+uQl7lfk/N/oMZTL2NE/TJsNKRNMc4=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
@ -238,6 +244,10 @@ github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtX
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4=
|
||||
go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw=
|
||||
go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
|
@ -249,20 +259,21 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk
|
|||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -279,12 +290,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -317,15 +328,15 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -333,7 +344,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -342,8 +353,12 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA=
|
||||
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -371,13 +386,13 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ=
|
||||
k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
|
||||
k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI=
|
||||
k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y=
|
||||
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
|
||||
k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
|
||||
k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc=
|
||||
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 h1:OmK1d0WrkD3IPfkskvroRykOulHVHf0s0ZIFRjyt+UI=
|
||||
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
|
|
127
go.mod
127
go.mod
|
@ -3,18 +3,17 @@ module github.com/prometheus/prometheus
|
|||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.29
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1
|
||||
github.com/alecthomas/kingpin/v2 v2.3.2
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.44.317
|
||||
github.com/aws/aws-sdk-go v1.45.25
|
||||
github.com/cespare/xxhash/v2 v2.2.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.99.0
|
||||
github.com/docker/docker v24.0.4+incompatible
|
||||
github.com/digitalocean/godo v1.104.1
|
||||
github.com/docker/docker v24.0.6+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.11.1
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2
|
||||
|
@ -25,92 +24,91 @@ require (
|
|||
github.com/go-zookeeper/zk v1.0.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8
|
||||
github.com/gophercloud/gophercloud v1.5.0
|
||||
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/gophercloud/gophercloud v1.7.0
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.25.1
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.0.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.8
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.4.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.9
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.16.7
|
||||
github.com/klauspost/compress v1.17.1
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.19.0
|
||||
github.com/miekg/dns v1.1.55
|
||||
github.com/linode/linodego v1.23.0
|
||||
github.com/miekg/dns v1.1.56
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.4.1
|
||||
github.com/ovh/go-ovh v1.4.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/alertmanager v0.26.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/prometheus/client_model v0.4.0
|
||||
github.com/prometheus/common v0.44.0
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/prometheus/client_model v0.5.0
|
||||
github.com/prometheus/common v0.45.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.10.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014
|
||||
go.opentelemetry.io/collector/semconv v0.81.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0
|
||||
go.opentelemetry.io/otel/sdk v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016
|
||||
go.opentelemetry.io/collector/semconv v0.87.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
|
||||
go.opentelemetry.io/otel v1.19.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0
|
||||
go.opentelemetry.io/otel/sdk v1.19.0
|
||||
go.opentelemetry.io/otel/trace v1.19.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
go.uber.org/goleak v1.2.1
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/net v0.13.0
|
||||
golang.org/x/oauth2 v0.10.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.10.0
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/oauth2 v0.13.0
|
||||
golang.org/x/sync v0.4.0
|
||||
golang.org/x/sys v0.13.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.org/x/tools v0.11.0
|
||||
google.golang.org/api v0.132.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753
|
||||
google.golang.org/grpc v1.56.2
|
||||
golang.org/x/tools v0.14.0
|
||||
google.golang.org/api v0.147.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a
|
||||
google.golang.org/grpc v1.58.3
|
||||
google.golang.org/protobuf v1.31.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.28.1
|
||||
k8s.io/apimachinery v0.28.1
|
||||
k8s.io/client-go v0.28.1
|
||||
k8s.io/api v0.28.2
|
||||
k8s.io/apimachinery v0.28.2
|
||||
k8s.io/client-go v0.28.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.23.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.22.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
|
@ -138,15 +136,13 @@ require (
|
|||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/go-resty/resty/v2 v2.7.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/glog v1.1.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
|
@ -166,29 +162,26 @@ require (
|
|||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/procfs v0.11.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.mongodb.org/mongo-driver v1.12.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
275
go.sum
275
go.sum
|
@ -18,8 +18,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.22.0 h1:cB8R6FtUtT1TYGl5R3xuxnW6OUIc/DrT2aiR16TTG7Y=
|
||||
cloud.google.com/go/compute v1.22.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
|
@ -34,38 +34,24 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
|
||||
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
|
@ -103,8 +89,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU=
|
||||
github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4=
|
||||
github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -127,11 +113,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
|||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
|
@ -152,13 +133,13 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
|
|||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E=
|
||||
github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA=
|
||||
github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA=
|
||||
github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.4+incompatible h1:s/LVDftw9hjblvqIeTiGYXBCD95nOEEl7qRsRrIOuQI=
|
||||
github.com/docker/docker v24.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE=
|
||||
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -176,8 +157,6 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
|
@ -295,9 +274,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
|||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
|
||||
|
@ -329,7 +307,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -352,8 +329,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -368,24 +346,24 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA=
|
||||
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
|
||||
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ=
|
||||
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo=
|
||||
github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs=
|
||||
github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -453,13 +431,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
|
|||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.4.0 h1:MqlAE+w125PLvJRCpAJmEwrIxoVdUdOyuFUhE/Ukbok=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.4.0/go.mod h1:l7fA5xsncFBzQTyw29/dw5Yr88yEGKKdc6BHf24ONS0=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -467,8 +445,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
|||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
|
||||
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -501,8 +479,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
|||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g=
|
||||
github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -520,8 +498,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw=
|
||||
github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ=
|
||||
github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU=
|
||||
github.com/linode/linodego v1.23.0/go.mod h1:0U7wj/UQOqBNbKv1FYTXiBUXueR8DY4HvIotwE0ENgg=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
@ -548,14 +526,14 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
|||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
|
||||
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
||||
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
@ -622,8 +600,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
|
|||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM=
|
||||
github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M=
|
||||
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
|
||||
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
|
@ -655,16 +633,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
|
@ -672,8 +650,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
|
@ -686,8 +664,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk=
|
||||
github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
|
@ -698,8 +676,8 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN
|
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU=
|
||||
|
@ -735,14 +713,12 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE
|
|||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
|
@ -784,37 +760,34 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4=
|
||||
go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw=
|
||||
go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0 h1:iqjq9LAB8aK++sKVcELezzn655JnBNdsDhghU4G/So8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.16.0/go.mod h1:hGXzO5bhhSHZnKvrDaXB82Y9DRFour0Nz/KrBh7reWw=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE=
|
||||
go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg=
|
||||
go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8=
|
||||
go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
|
||||
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
|
@ -836,13 +809,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -853,8 +823,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -876,8 +846,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -922,17 +892,16 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -946,8 +915,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1013,16 +982,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1034,8 +1001,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1095,8 +1062,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1116,8 +1083,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc=
|
||||
google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0=
|
||||
google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc=
|
||||
google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1156,12 +1123,12 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 h1:+VoAg+OKmWaommL56xmZSE2sUK8A7m6SUO7X89F2tbw=
|
||||
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753/go.mod h1:iqkVr8IRpZ53gx1dEnWlCUIEwDWqWARWrbzpasaTNYM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 h1:lCbbUxUDD+DiXx9Q6F/ttL0aAu7N2pz8XnmMm8ZW4NE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 h1:XUODHrpzJEUeWmVo/jfNTLj0YyVveOo28oE6vkFbkO4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a h1:myvhA4is3vrit1a6NZCWBIwN0kNEnX21DJOJX/NvIfI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1180,10 +1147,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1243,12 +1208,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108=
|
||||
k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg=
|
||||
k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY=
|
||||
k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
|
||||
k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8=
|
||||
k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE=
|
||||
k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw=
|
||||
k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg=
|
||||
k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
|
||||
k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
|
||||
k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY=
|
||||
k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc=
|
||||
|
|
|
@ -307,13 +307,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
|||
// Exact match is when there are no new buckets (even empty) and no missing buckets,
|
||||
// and all the bucket values match. Spans can have different empty length spans in between,
|
||||
// but they must represent the same bucket layout to match.
|
||||
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
|
||||
// because this method is about data equality rather than mathematical equality.
|
||||
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
|
||||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
|
||||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -324,16 +328,44 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Size returns the total size of the FloatHistogram, which includes the size of the pointer
|
||||
// to FloatHistogram, all its fields, and all elements contained in slices.
|
||||
// NOTE: this is only valid for 64 bit architectures.
|
||||
func (fh *FloatHistogram) Size() int {
|
||||
// Size of each slice separately.
|
||||
posSpanSize := len(fh.PositiveSpans) * 8 // 8 bytes (int32 + uint32).
|
||||
negSpanSize := len(fh.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
|
||||
posBucketSize := len(fh.PositiveBuckets) * 8 // 8 bytes (float64).
|
||||
negBucketSize := len(fh.NegativeBuckets) * 8 // 8 bytes (float64).
|
||||
|
||||
// Total size of the struct.
|
||||
|
||||
// fh is 8 bytes.
|
||||
// fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding).
|
||||
// fh.Schema is 4 bytes.
|
||||
// fh.ZeroThreshold is 8 bytes.
|
||||
// fh.ZeroCount is 8 bytes.
|
||||
// fh.Count is 8 bytes.
|
||||
// fh.Sum is 8 bytes.
|
||||
// fh.PositiveSpans is 24 bytes.
|
||||
// fh.NegativeSpans is 24 bytes.
|
||||
// fh.PositiveBuckets is 24 bytes.
|
||||
// fh.NegativeBuckets is 24 bytes.
|
||||
structSize := 144
|
||||
|
||||
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize
|
||||
}
|
||||
|
||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||
// merges spans that are consecutive or at most maxEmptyBuckets apart, and
|
||||
// finally splits spans that contain more consecutive empty buckets than
|
||||
|
@ -1110,3 +1142,15 @@ func addBuckets(
|
|||
|
||||
return spansA, bucketsA
|
||||
}
|
||||
|
||||
func floatBucketsMatch(b1, b2 []float64) bool {
|
||||
if len(b1) != len(b2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range b1 {
|
||||
if math.Float64bits(b) != math.Float64bits(b2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2293,6 +2293,108 @@ func TestFloatBucketIteratorTargetSchema(t *testing.T) {
|
|||
require.False(t, it.Next(), "negative iterator not exhausted")
|
||||
}
|
||||
|
||||
// TestFloatHistogramEquals tests FloatHistogram with float-specific cases that
|
||||
// cannot be covered by TestHistogramEquals.
|
||||
func TestFloatHistogramEquals(t *testing.T) {
|
||||
h1 := FloatHistogram{
|
||||
Schema: 3,
|
||||
Count: 2.2,
|
||||
Sum: 9.7,
|
||||
ZeroThreshold: 0.1,
|
||||
ZeroCount: 1.1,
|
||||
PositiveBuckets: []float64{3},
|
||||
NegativeBuckets: []float64{4},
|
||||
}
|
||||
|
||||
equals := func(h1, h2 FloatHistogram) {
|
||||
require.True(t, h1.Equals(&h2))
|
||||
require.True(t, h2.Equals(&h1))
|
||||
}
|
||||
notEquals := func(h1, h2 FloatHistogram) {
|
||||
require.False(t, h1.Equals(&h2))
|
||||
require.False(t, h2.Equals(&h1))
|
||||
}
|
||||
|
||||
h2 := h1.Copy()
|
||||
equals(h1, *h2)
|
||||
|
||||
// Count is NaN (but not a StaleNaN).
|
||||
hCountNaN := h1.Copy()
|
||||
hCountNaN.Count = math.NaN()
|
||||
notEquals(h1, *hCountNaN)
|
||||
equals(*hCountNaN, *hCountNaN)
|
||||
|
||||
// ZeroCount is NaN (but not a StaleNaN).
|
||||
hZeroCountNaN := h1.Copy()
|
||||
hZeroCountNaN.ZeroCount = math.NaN()
|
||||
notEquals(h1, *hZeroCountNaN)
|
||||
equals(*hZeroCountNaN, *hZeroCountNaN)
|
||||
|
||||
// Positive bucket value is NaN.
|
||||
hPosBucketNaN := h1.Copy()
|
||||
hPosBucketNaN.PositiveBuckets[0] = math.NaN()
|
||||
notEquals(h1, *hPosBucketNaN)
|
||||
equals(*hPosBucketNaN, *hPosBucketNaN)
|
||||
|
||||
// Negative bucket value is NaN.
|
||||
hNegBucketNaN := h1.Copy()
|
||||
hNegBucketNaN.NegativeBuckets[0] = math.NaN()
|
||||
notEquals(h1, *hNegBucketNaN)
|
||||
equals(*hNegBucketNaN, *hNegBucketNaN)
|
||||
}
|
||||
|
||||
func TestFloatHistogramSize(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
fh *FloatHistogram
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
"without spans and buckets",
|
||||
&FloatHistogram{ // 8 bytes.
|
||||
CounterResetHint: 0, // 1 byte.
|
||||
Schema: 1, // 4 bytes.
|
||||
ZeroThreshold: 0.01, // 8 bytes.
|
||||
ZeroCount: 5.5, // 8 bytes.
|
||||
Count: 3493.3, // 8 bytes.
|
||||
Sum: 2349209.324, // 8 bytes.
|
||||
PositiveSpans: nil, // 24 bytes.
|
||||
PositiveBuckets: nil, // 24 bytes.
|
||||
NegativeSpans: nil, // 24 bytes.
|
||||
NegativeBuckets: nil, // 24 bytes.
|
||||
},
|
||||
8 + 4 + 4 + 8 + 8 + 8 + 8 + 24 + 24 + 24 + 24,
|
||||
},
|
||||
{
|
||||
"complete struct",
|
||||
&FloatHistogram{ // 8 bytes.
|
||||
CounterResetHint: 0, // 1 byte.
|
||||
Schema: 1, // 4 bytes.
|
||||
ZeroThreshold: 0.01, // 8 bytes.
|
||||
ZeroCount: 5.5, // 8 bytes.
|
||||
Count: 3493.3, // 8 bytes.
|
||||
Sum: 2349209.324, // 8 bytes.
|
||||
PositiveSpans: []Span{ // 24 bytes.
|
||||
{-2, 1}, // 2 * 4 bytes.
|
||||
{2, 3}, // 2 * 4 bytes.
|
||||
},
|
||||
PositiveBuckets: []float64{1, 3.3, 4.2, 0.1}, // 24 bytes + 4 * 8 bytes.
|
||||
NegativeSpans: []Span{ // 24 bytes.
|
||||
{3, 2}, // 2 * 4 bytes.
|
||||
{3, 2}}, // 2 * 4 bytes.
|
||||
NegativeBuckets: []float64{3.1, 3, 1.234e5, 1000}, // 24 bytes + 4 * 8 bytes.
|
||||
},
|
||||
8 + 4 + 4 + 8 + 8 + 8 + 8 + (24 + 2*4 + 2*4) + (24 + 2*4 + 2*4) + (24 + 4*8) + (24 + 4*8),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, c.fh.Size())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFloatHistogramAllBucketIterator(b *testing.B) {
|
||||
rng := rand.New(rand.NewSource(0))
|
||||
|
||||
|
|
|
@ -347,18 +347,6 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp
|
|||
return buckets, spans
|
||||
}
|
||||
|
||||
func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool {
|
||||
if len(b1) != len(b2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range b1 {
|
||||
if b != b2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getBound(idx, schema int32) float64 {
|
||||
// Here a bit of context about the behavior for the last bucket counting
|
||||
// regular numbers (called simply "last bucket" below) and the bucket
|
||||
|
|
|
@ -17,6 +17,8 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// CounterResetHint contains the known information about a counter reset,
|
||||
|
@ -174,13 +176,16 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
|||
// Exact match is when there are no new buckets (even empty) and no missing buckets,
|
||||
// and all the bucket values match. Spans can have different empty length spans in between,
|
||||
// but they must represent the same bucket layout to match.
|
||||
// Sum is compared based on its bit pattern because this method
|
||||
// is about data equality rather than mathematical equality.
|
||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
|
||||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -191,10 +196,10 @@ func (h *Histogram) Equals(h2 *Histogram) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
)
|
||||
|
||||
func TestHistogramString(t *testing.T) {
|
||||
|
@ -411,8 +413,8 @@ func TestHistogramToFloat(t *testing.T) {
|
|||
require.Equal(t, h.String(), fh.String())
|
||||
}
|
||||
|
||||
// TestHistogramMatches tests both Histogram and FloatHistogram.
|
||||
func TestHistogramMatches(t *testing.T) {
|
||||
// TestHistogramEquals tests both Histogram and FloatHistogram.
|
||||
func TestHistogramEquals(t *testing.T) {
|
||||
h1 := Histogram{
|
||||
Schema: 3,
|
||||
Count: 61,
|
||||
|
@ -537,6 +539,21 @@ func TestHistogramMatches(t *testing.T) {
|
|||
})
|
||||
h2.NegativeBuckets = append(h2.NegativeBuckets, 1)
|
||||
notEquals(h1, *h2)
|
||||
|
||||
// Sum is StaleNaN.
|
||||
hStale := h1.Copy()
|
||||
hStale.Sum = math.Float64frombits(value.StaleNaN)
|
||||
notEquals(h1, *hStale)
|
||||
equals(*hStale, *hStale)
|
||||
|
||||
// Sum is NaN (but not a StaleNaN).
|
||||
hNaN := h1.Copy()
|
||||
hNaN.Sum = math.NaN()
|
||||
notEquals(h1, *hNaN)
|
||||
equals(*hNaN, *hNaN)
|
||||
|
||||
// Sum StaleNaN vs regular NaN.
|
||||
notEquals(*hStale, *hNaN)
|
||||
}
|
||||
|
||||
func TestHistogramCompact(t *testing.T) {
|
||||
|
|
|
@ -16,6 +16,8 @@ package textparse
|
|||
import (
|
||||
"mime"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -64,6 +66,11 @@ type Parser interface {
|
|||
// retrieved (including the case where no exemplars exist at all).
|
||||
Exemplar(l *exemplar.Exemplar) bool
|
||||
|
||||
// CreatedTimestamp writes the created timestamp of the current sample
|
||||
// into the passed timestamp. It returns false if no created timestamp
|
||||
// exists or if the metric type does not support created timestamps.
|
||||
CreatedTimestamp(ct *types.Timestamp) bool
|
||||
|
||||
// Next advances the parser to the next sample. It returns false if no
|
||||
// more samples were read or an error occurred.
|
||||
Next() (Entry, error)
|
||||
|
|
|
@ -24,6 +24,8 @@ import (
|
|||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -211,6 +213,11 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// CreatedTimestamp returns false because OpenMetricsParser does not support created timestamps (yet).
|
||||
func (p *OpenMetricsParser) CreatedTimestamp(_ *types.Timestamp) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// nextToken returns the next token from the openMetricsLexer.
|
||||
func (p *OpenMetricsParser) nextToken() token {
|
||||
tok := p.l.Lex()
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"unicode/utf8"
|
||||
"unsafe"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -245,6 +247,11 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// CreatedTimestamp returns false because PromParser does not support created timestamps.
|
||||
func (p *PromParser) CreatedTimestamp(_ *types.Timestamp) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// nextToken returns the next token from the promlexer. It skips over tabs
|
||||
// and spaces.
|
||||
func (p *PromParser) nextToken() token {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"unicode/utf8"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -147,9 +148,15 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
|
|||
if ts != 0 {
|
||||
return p.metricBytes.Bytes(), &ts, v
|
||||
}
|
||||
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
|
||||
// general, but proto3 has no distinction between unset and
|
||||
// default. Need to avoid in the final format.
|
||||
// TODO(beorn7): We assume here that ts==0 means no timestamp. That's
|
||||
// not true in general, but proto3 originally has no distinction between
|
||||
// unset and default. At a later stage, the `optional` keyword was
|
||||
// (re-)introduced in proto3, but gogo-protobuf never got updated to
|
||||
// support it. (Note that setting `[(gogoproto.nullable) = true]` for
|
||||
// the `timestamp_ms` field doesn't help, either.) We plan to migrate
|
||||
// away from gogo-protobuf to an actively maintained protobuf
|
||||
// implementation. Once that's done, we can simply use the `optional`
|
||||
// keyword and check for the unset state explicitly.
|
||||
return p.metricBytes.Bytes(), nil, v
|
||||
}
|
||||
|
||||
|
@ -347,6 +354,24 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (p *ProtobufParser) CreatedTimestamp(ct *types.Timestamp) bool {
|
||||
var foundCT *types.Timestamp
|
||||
switch p.mf.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
foundCT = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp()
|
||||
case dto.MetricType_SUMMARY:
|
||||
foundCT = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp()
|
||||
case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM:
|
||||
foundCT = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp()
|
||||
default:
|
||||
}
|
||||
if foundCT == nil {
|
||||
return false
|
||||
}
|
||||
*ct = *foundCT
|
||||
return true
|
||||
}
|
||||
|
||||
// Next advances the parser to the next "sample" (emulating the behavior of a
|
||||
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
|
||||
// read.
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
|
@ -530,6 +531,69 @@ metric: <
|
|||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_counter_with_createdtimestamp"
|
||||
help: "A counter with a created timestamp."
|
||||
type: COUNTER
|
||||
metric: <
|
||||
counter: <
|
||||
value: 42
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_summary_with_createdtimestamp"
|
||||
help: "A summary with a created timestamp."
|
||||
type: SUMMARY
|
||||
metric: <
|
||||
summary: <
|
||||
sample_count: 42
|
||||
sample_sum: 1.234
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_histogram_with_createdtimestamp"
|
||||
help: "A histogram with a created timestamp."
|
||||
type: HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
positive_span: <
|
||||
offset: 0
|
||||
length: 0
|
||||
>
|
||||
>
|
||||
>
|
||||
|
||||
`,
|
||||
`name: "test_gaugehistogram_with_createdtimestamp"
|
||||
help: "A gauge histogram with a created timestamp."
|
||||
type: GAUGE_HISTOGRAM
|
||||
metric: <
|
||||
histogram: <
|
||||
created_timestamp: <
|
||||
seconds: 1
|
||||
nanos: 1
|
||||
>
|
||||
positive_span: <
|
||||
offset: 0
|
||||
length: 0
|
||||
>
|
||||
>
|
||||
>
|
||||
|
||||
`,
|
||||
}
|
||||
|
||||
|
@ -566,6 +630,7 @@ func TestProtobufParse(t *testing.T) {
|
|||
shs *histogram.Histogram
|
||||
fhs *histogram.FloatHistogram
|
||||
e []exemplar.Exemplar
|
||||
ct *types.Timestamp
|
||||
}
|
||||
|
||||
inputBuf := createTestProtoBuf(t)
|
||||
|
@ -997,6 +1062,86 @@ func TestProtobufParse(t *testing.T) {
|
|||
"__name__", "empty_histogram",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
help: "A counter with a created timestamp.",
|
||||
},
|
||||
{
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
typ: MetricTypeCounter,
|
||||
},
|
||||
{
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
v: 42,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_counter_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_summary_with_createdtimestamp",
|
||||
help: "A summary with a created timestamp.",
|
||||
},
|
||||
{
|
||||
m: "test_summary_with_createdtimestamp",
|
||||
typ: MetricTypeSummary,
|
||||
},
|
||||
{
|
||||
m: "test_summary_with_createdtimestamp_count",
|
||||
v: 42,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_summary_with_createdtimestamp_count",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_summary_with_createdtimestamp_sum",
|
||||
v: 1.234,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_summary_with_createdtimestamp_sum",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
help: "A histogram with a created timestamp.",
|
||||
},
|
||||
{
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
typ: MetricTypeHistogram,
|
||||
},
|
||||
{
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
shs: &histogram.Histogram{
|
||||
CounterResetHint: histogram.UnknownCounterReset,
|
||||
PositiveSpans: []histogram.Span{},
|
||||
NegativeSpans: []histogram.Span{},
|
||||
},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
{
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
help: "A gauge histogram with a created timestamp.",
|
||||
},
|
||||
{
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
typ: MetricTypeGaugeHistogram,
|
||||
},
|
||||
{
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
shs: &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
PositiveSpans: []histogram.Span{},
|
||||
NegativeSpans: []histogram.Span{},
|
||||
},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_gaugehistogram_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1739,6 +1884,86 @@ func TestProtobufParse(t *testing.T) {
|
|||
"__name__", "empty_histogram",
|
||||
),
|
||||
},
|
||||
{ // 81
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
help: "A counter with a created timestamp.",
|
||||
},
|
||||
{ // 82
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
typ: MetricTypeCounter,
|
||||
},
|
||||
{ // 83
|
||||
m: "test_counter_with_createdtimestamp",
|
||||
v: 42,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_counter_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
{ // 84
|
||||
m: "test_summary_with_createdtimestamp",
|
||||
help: "A summary with a created timestamp.",
|
||||
},
|
||||
{ // 85
|
||||
m: "test_summary_with_createdtimestamp",
|
||||
typ: MetricTypeSummary,
|
||||
},
|
||||
{ // 86
|
||||
m: "test_summary_with_createdtimestamp_count",
|
||||
v: 42,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_summary_with_createdtimestamp_count",
|
||||
),
|
||||
},
|
||||
{ // 87
|
||||
m: "test_summary_with_createdtimestamp_sum",
|
||||
v: 1.234,
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_summary_with_createdtimestamp_sum",
|
||||
),
|
||||
},
|
||||
{ // 88
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
help: "A histogram with a created timestamp.",
|
||||
},
|
||||
{ // 89
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
typ: MetricTypeHistogram,
|
||||
},
|
||||
{ // 90
|
||||
m: "test_histogram_with_createdtimestamp",
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
shs: &histogram.Histogram{
|
||||
CounterResetHint: histogram.UnknownCounterReset,
|
||||
PositiveSpans: []histogram.Span{},
|
||||
NegativeSpans: []histogram.Span{},
|
||||
},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_histogram_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
{ // 91
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
help: "A gauge histogram with a created timestamp.",
|
||||
},
|
||||
{ // 92
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
typ: MetricTypeGaugeHistogram,
|
||||
},
|
||||
{ // 93
|
||||
m: "test_gaugehistogram_with_createdtimestamp",
|
||||
ct: &types.Timestamp{Seconds: 1, Nanos: 1},
|
||||
shs: &histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
PositiveSpans: []histogram.Span{},
|
||||
NegativeSpans: []histogram.Span{},
|
||||
},
|
||||
lset: labels.FromStrings(
|
||||
"__name__", "test_gaugehistogram_with_createdtimestamp",
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -1764,8 +1989,10 @@ func TestProtobufParse(t *testing.T) {
|
|||
m, ts, v := p.Series()
|
||||
|
||||
var e exemplar.Exemplar
|
||||
var ct types.Timestamp
|
||||
p.Metric(&res)
|
||||
found := p.Exemplar(&e)
|
||||
eFound := p.Exemplar(&e)
|
||||
ctFound := p.CreatedTimestamp(&ct)
|
||||
require.Equal(t, exp[i].m, string(m), "i: %d", i)
|
||||
if ts != nil {
|
||||
require.Equal(t, exp[i].t, *ts, "i: %d", i)
|
||||
|
@ -1775,12 +2002,18 @@ func TestProtobufParse(t *testing.T) {
|
|||
require.Equal(t, exp[i].v, v, "i: %d", i)
|
||||
require.Equal(t, exp[i].lset, res, "i: %d", i)
|
||||
if len(exp[i].e) == 0 {
|
||||
require.Equal(t, false, found, "i: %d", i)
|
||||
require.Equal(t, false, eFound, "i: %d", i)
|
||||
} else {
|
||||
require.Equal(t, true, found, "i: %d", i)
|
||||
require.Equal(t, true, eFound, "i: %d", i)
|
||||
require.Equal(t, exp[i].e[0], e, "i: %d", i)
|
||||
require.False(t, p.Exemplar(&e), "too many exemplars returned, i: %d", i)
|
||||
}
|
||||
if exp[i].ct != nil {
|
||||
require.Equal(t, true, ctFound, "i: %d", i)
|
||||
require.Equal(t, exp[i].ct.String(), ct.String(), "i: %d", i)
|
||||
} else {
|
||||
require.Equal(t, false, ctFound, "i: %d", i)
|
||||
}
|
||||
|
||||
case EntryHistogram:
|
||||
m, ts, shs, fhs := p.Histogram()
|
||||
|
|
|
@ -172,11 +172,12 @@ func (m *Gauge) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Counter struct {
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
|
||||
CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Counter) Reset() { *m = Counter{} }
|
||||
|
@ -226,6 +227,13 @@ func (m *Counter) GetExemplar() *Exemplar {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Counter) GetCreatedTimestamp() *types.Timestamp {
|
||||
if m != nil {
|
||||
return m.CreatedTimestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Quantile struct {
|
||||
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
|
||||
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
|
@ -282,12 +290,13 @@ func (m *Quantile) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Summary struct {
|
||||
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"`
|
||||
CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Summary) Reset() { *m = Summary{} }
|
||||
|
@ -344,6 +353,13 @@ func (m *Summary) GetQuantile() []Quantile {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Summary) GetCreatedTimestamp() *types.Timestamp {
|
||||
if m != nil {
|
||||
return m.CreatedTimestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Untyped struct {
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -396,7 +412,8 @@ type Histogram struct {
|
|||
SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"`
|
||||
SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"`
|
||||
// Buckets for the conventional histogram.
|
||||
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
|
||||
Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"`
|
||||
CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
|
||||
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets.
|
||||
|
@ -489,6 +506,13 @@ func (m *Histogram) GetBucket() []Bucket {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetCreatedTimestamp() *types.Timestamp {
|
||||
if m != nil {
|
||||
return m.CreatedTimestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSchema() int32 {
|
||||
if m != nil {
|
||||
return m.Schema
|
||||
|
@ -941,65 +965,67 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptor_d1e5ddb18987a258 = []byte{
|
||||
// 923 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12,
|
||||
0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26,
|
||||
0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf,
|
||||
0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f,
|
||||
0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80,
|
||||
0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27,
|
||||
0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58,
|
||||
0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41,
|
||||
0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7,
|
||||
0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5,
|
||||
0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50,
|
||||
0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68,
|
||||
0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91,
|
||||
0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf,
|
||||
0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94,
|
||||
0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36,
|
||||
0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03,
|
||||
0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1,
|
||||
0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4,
|
||||
0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61,
|
||||
0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1,
|
||||
0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac,
|
||||
0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81,
|
||||
0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76,
|
||||
0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13,
|
||||
0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08,
|
||||
0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d,
|
||||
0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29,
|
||||
0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18,
|
||||
0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f,
|
||||
0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c,
|
||||
0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95,
|
||||
0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8,
|
||||
0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95,
|
||||
0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22,
|
||||
0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5,
|
||||
0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49,
|
||||
0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a,
|
||||
0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53,
|
||||
0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5,
|
||||
0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae,
|
||||
0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8,
|
||||
0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a,
|
||||
0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26,
|
||||
0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c,
|
||||
0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c,
|
||||
0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87,
|
||||
0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60,
|
||||
0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b,
|
||||
0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96,
|
||||
0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c,
|
||||
0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef,
|
||||
0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1,
|
||||
0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a,
|
||||
0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f,
|
||||
0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00,
|
||||
// 960 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
|
||||
0x14, 0xee, 0xd6, 0xbf, 0x7b, 0x1c, 0x27, 0x9b, 0xc1, 0xaa, 0x56, 0x81, 0xc4, 0x66, 0x25, 0xa4,
|
||||
0x80, 0x90, 0x2d, 0xa0, 0x08, 0x54, 0x8a, 0x44, 0xd2, 0xa6, 0x2e, 0x2a, 0x6e, 0xcb, 0xd8, 0xbe,
|
||||
0x28, 0x37, 0xab, 0xb1, 0x3d, 0x59, 0xaf, 0xd8, 0xdd, 0x59, 0xf6, 0xa7, 0x22, 0xdc, 0xf3, 0x0c,
|
||||
0xbc, 0x00, 0x17, 0x3c, 0x05, 0x97, 0xa8, 0x97, 0x5c, 0x71, 0x89, 0x50, 0x9e, 0x04, 0xcd, 0xdf,
|
||||
0xae, 0x53, 0xad, 0x03, 0x81, 0xbb, 0x99, 0xcf, 0xdf, 0x39, 0xf3, 0x9d, 0x6f, 0xc6, 0xe7, 0x2c,
|
||||
0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x69, 0xb6, 0xa6, 0x79, 0x3a, 0x5a, 0x06, 0x3e, 0x8d, 0xb2,
|
||||
0x51, 0x48, 0xb3, 0xc4, 0x5f, 0xa6, 0xc3, 0x38, 0x61, 0x19, 0x43, 0x3d, 0x9f, 0x0d, 0x4b, 0xce,
|
||||
0x50, 0x72, 0x0e, 0x7a, 0x1e, 0xf3, 0x98, 0x20, 0x8c, 0xf8, 0x4a, 0x72, 0x0f, 0xfa, 0x1e, 0x63,
|
||||
0x5e, 0x40, 0x47, 0x62, 0xb7, 0xc8, 0xcf, 0x47, 0x99, 0x1f, 0xd2, 0x34, 0x23, 0x61, 0x2c, 0x09,
|
||||
0xce, 0xc7, 0x60, 0x7e, 0x45, 0x16, 0x34, 0x78, 0x4e, 0xfc, 0x04, 0x21, 0xa8, 0x47, 0x24, 0xa4,
|
||||
0xb6, 0x31, 0x30, 0x8e, 0x4d, 0x2c, 0xd6, 0xa8, 0x07, 0x8d, 0x97, 0x24, 0xc8, 0xa9, 0x7d, 0x5b,
|
||||
0x80, 0x72, 0xe3, 0x1c, 0x42, 0x63, 0x4c, 0x72, 0x6f, 0xe3, 0x67, 0x1e, 0x63, 0xe8, 0x9f, 0x7f,
|
||||
0x36, 0xa0, 0xf5, 0x80, 0xe5, 0x51, 0x46, 0x93, 0x6a, 0x06, 0xba, 0x07, 0x6d, 0xfa, 0x3d, 0x0d,
|
||||
0xe3, 0x80, 0x24, 0x22, 0x73, 0xe7, 0xc3, 0xa3, 0x61, 0x55, 0x5d, 0xc3, 0x33, 0xc5, 0xc2, 0x05,
|
||||
0x1f, 0x8d, 0x61, 0x7f, 0x99, 0x50, 0x92, 0xd1, 0x95, 0x5b, 0x94, 0x63, 0xd7, 0x44, 0x92, 0x83,
|
||||
0xa1, 0x2c, 0x78, 0xa8, 0x0b, 0x1e, 0xce, 0x34, 0x03, 0x5b, 0x2a, 0xa8, 0x40, 0x9c, 0xfb, 0xd0,
|
||||
0xfe, 0x3a, 0x27, 0x51, 0xe6, 0x07, 0x14, 0x1d, 0x40, 0xfb, 0x3b, 0xb5, 0x56, 0x4a, 0x8b, 0xfd,
|
||||
0x55, 0x0f, 0x8a, 0x22, 0xff, 0x30, 0xa0, 0x35, 0xcd, 0xc3, 0x90, 0x24, 0x17, 0xe8, 0x6d, 0xd8,
|
||||
0x49, 0x49, 0x18, 0x07, 0xd4, 0x5d, 0xf2, 0xb2, 0x45, 0x86, 0x3a, 0xee, 0x48, 0x4c, 0x38, 0x81,
|
||||
0x0e, 0x01, 0x14, 0x25, 0xcd, 0x43, 0x95, 0xc9, 0x94, 0xc8, 0x34, 0x0f, 0xd1, 0x17, 0x1b, 0xe7,
|
||||
0xd7, 0x06, 0xb5, 0xed, 0x86, 0x68, 0xc5, 0xa7, 0xf5, 0x57, 0x7f, 0xf6, 0x6f, 0x6d, 0xa8, 0xac,
|
||||
0xb4, 0xa5, 0xfe, 0x1f, 0x6c, 0xe9, 0x43, 0x6b, 0x1e, 0x65, 0x17, 0x31, 0x5d, 0x6d, 0xb9, 0xde,
|
||||
0x5f, 0x1b, 0x60, 0x3e, 0xf6, 0xd3, 0x8c, 0x79, 0x09, 0x09, 0xff, 0x4d, 0xed, 0xef, 0x03, 0xda,
|
||||
0xa4, 0xb8, 0xe7, 0x01, 0x23, 0x99, 0xd0, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x88, 0xe3, 0xff, 0xe4,
|
||||
0xd4, 0x3d, 0x68, 0x2e, 0xf2, 0xe5, 0xb7, 0x34, 0x53, 0x3e, 0xbd, 0x55, 0xed, 0xd3, 0xa9, 0xe0,
|
||||
0x28, 0x97, 0x54, 0x44, 0xb5, 0x47, 0x7b, 0x37, 0xf7, 0x08, 0xdd, 0x81, 0x66, 0xba, 0x5c, 0xd3,
|
||||
0x90, 0xd8, 0x8d, 0x81, 0x71, 0xbc, 0x8f, 0xd5, 0x0e, 0xbd, 0x03, 0xbb, 0x3f, 0xd0, 0x84, 0xb9,
|
||||
0xd9, 0x3a, 0xa1, 0xe9, 0x9a, 0x05, 0x2b, 0xbb, 0x29, 0xf4, 0x77, 0x39, 0x3a, 0xd3, 0x20, 0x2f,
|
||||
0x51, 0xd0, 0xa4, 0x63, 0x2d, 0xe1, 0x98, 0xc9, 0x11, 0xe9, 0xd7, 0x31, 0x58, 0xe5, 0xcf, 0xca,
|
||||
0xad, 0xb6, 0xc8, 0xb3, 0x5b, 0x90, 0xa4, 0x57, 0x4f, 0xa0, 0x1b, 0x51, 0x8f, 0x64, 0xfe, 0x4b,
|
||||
0xea, 0xa6, 0x31, 0x89, 0x6c, 0x53, 0x78, 0x32, 0xb8, 0xce, 0x93, 0x69, 0x4c, 0x22, 0xe5, 0xcb,
|
||||
0x8e, 0x0e, 0xe6, 0x18, 0x17, 0x5f, 0x24, 0x5b, 0xd1, 0x20, 0x23, 0x36, 0x0c, 0x6a, 0xc7, 0x08,
|
||||
0x17, 0x47, 0x3c, 0xe4, 0xe0, 0x15, 0x9a, 0x2c, 0xa0, 0x33, 0xa8, 0xf1, 0x1a, 0x35, 0x2a, 0x8b,
|
||||
0x78, 0x02, 0xdd, 0x98, 0xa5, 0x7e, 0x29, 0x6d, 0xe7, 0x66, 0xd2, 0x74, 0xb0, 0x96, 0x56, 0x24,
|
||||
0x93, 0xd2, 0xba, 0x52, 0x9a, 0x46, 0x0b, 0x69, 0x05, 0x4d, 0x4a, 0xdb, 0x95, 0xd2, 0x34, 0x2a,
|
||||
0xa4, 0x39, 0xbf, 0x19, 0xd0, 0x94, 0x07, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x96,
|
||||
0x23, 0x5f, 0xf0, 0x5e, 0x89, 0xcb, 0x82, 0xee, 0xc2, 0x9d, 0xd7, 0xa9, 0x57, 0x5e, 0x72, 0xef,
|
||||
0xb5, 0x00, 0x79, 0x43, 0x7d, 0xe8, 0xe4, 0x71, 0x4c, 0x13, 0x77, 0xc1, 0xf2, 0x68, 0xa5, 0x9e,
|
||||
0x33, 0x08, 0xe8, 0x94, 0x23, 0x57, 0x5a, 0x61, 0xed, 0x66, 0xad, 0xd0, 0xb9, 0x0f, 0x50, 0x1a,
|
||||
0xc7, 0x1f, 0x25, 0x3b, 0x3f, 0x4f, 0xa9, 0xac, 0x60, 0x1f, 0xab, 0x1d, 0xc7, 0x03, 0x1a, 0x79,
|
||||
0xd9, 0x5a, 0x9c, 0xde, 0xc5, 0x6a, 0xe7, 0xfc, 0x64, 0x40, 0x5b, 0x27, 0x45, 0x9f, 0x41, 0x23,
|
||||
0xe0, 0x93, 0xc0, 0x36, 0xc4, 0x35, 0xf5, 0xab, 0x35, 0x14, 0xc3, 0x42, 0xdd, 0x92, 0x8c, 0xa9,
|
||||
0xee, 0x90, 0xe8, 0x53, 0x30, 0x6f, 0xd2, 0xa0, 0x4b, 0xb2, 0xf3, 0x63, 0x0d, 0x9a, 0x13, 0x31,
|
||||
0xf5, 0xfe, 0x9f, 0xae, 0x0f, 0xa0, 0xe1, 0xf1, 0x39, 0xa5, 0x66, 0xcc, 0x9b, 0xd5, 0xc1, 0x62,
|
||||
0x94, 0x61, 0xc9, 0x44, 0x9f, 0x40, 0x6b, 0x29, 0x47, 0x97, 0x92, 0x7c, 0x58, 0x1d, 0xa4, 0xe6,
|
||||
0x1b, 0xd6, 0x6c, 0x1e, 0x98, 0xca, 0x71, 0xa0, 0xba, 0xee, 0x96, 0x40, 0x35, 0x33, 0xb0, 0x66,
|
||||
0xf3, 0xc0, 0x5c, 0xf6, 0x5b, 0xd1, 0x4c, 0xb6, 0x06, 0xaa, 0xa6, 0x8c, 0x35, 0x1b, 0x7d, 0x0e,
|
||||
0xe6, 0x5a, 0xb7, 0x61, 0xd1, 0x44, 0xb6, 0xda, 0x53, 0x74, 0x6b, 0x5c, 0x46, 0xf0, 0xc6, 0x5d,
|
||||
0x38, 0xee, 0x86, 0xa9, 0xe8, 0x54, 0x35, 0xdc, 0x29, 0xb0, 0x49, 0xea, 0xfc, 0x62, 0xc0, 0x8e,
|
||||
0xbc, 0x87, 0x47, 0x24, 0xf4, 0x83, 0x8b, 0xca, 0x4f, 0x04, 0x04, 0xf5, 0x35, 0x0d, 0x62, 0xf5,
|
||||
0x85, 0x20, 0xd6, 0xe8, 0x2e, 0xd4, 0xb9, 0x46, 0x61, 0xe1, 0xee, 0xb6, 0xff, 0xbc, 0xcc, 0x3c,
|
||||
0xbb, 0x88, 0x29, 0x16, 0x6c, 0xde, 0xda, 0xe5, 0xb7, 0x8e, 0x5d, 0xbf, 0xae, 0xb5, 0xcb, 0x38,
|
||||
0xdd, 0xda, 0x65, 0xc4, 0x7b, 0x0b, 0x80, 0x32, 0x1f, 0xea, 0x40, 0xeb, 0xc1, 0xb3, 0xf9, 0xd3,
|
||||
0xd9, 0x19, 0xb6, 0x6e, 0x21, 0x13, 0x1a, 0xe3, 0x93, 0xf9, 0xf8, 0xcc, 0x32, 0x38, 0x3e, 0x9d,
|
||||
0x4f, 0x26, 0x27, 0xf8, 0x85, 0x75, 0x9b, 0x6f, 0xe6, 0x4f, 0x67, 0x2f, 0x9e, 0x9f, 0x3d, 0xb4,
|
||||
0x6a, 0xa8, 0x0b, 0xe6, 0xe3, 0x2f, 0xa7, 0xb3, 0x67, 0x63, 0x7c, 0x32, 0xb1, 0xea, 0xe8, 0x0d,
|
||||
0xd8, 0x13, 0x31, 0x6e, 0x09, 0x36, 0x4e, 0x9d, 0x57, 0x97, 0x47, 0xc6, 0xef, 0x97, 0x47, 0xc6,
|
||||
0x5f, 0x97, 0x47, 0xc6, 0x37, 0x3d, 0x9f, 0xb9, 0xa5, 0x38, 0x57, 0x8a, 0x5b, 0x34, 0xc5, 0xcb,
|
||||
0xfe, 0xe8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x2e, 0x66, 0xc1, 0xcb, 0x09, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *LabelPair) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -1100,6 +1126,18 @@ func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
{
|
||||
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if m.Exemplar != nil {
|
||||
{
|
||||
size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i])
|
||||
|
@ -1184,6 +1222,18 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
{
|
||||
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
if len(m.Quantile) > 0 {
|
||||
for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
|
@ -1269,32 +1319,44 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
{
|
||||
size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x7a
|
||||
}
|
||||
if len(m.PositiveCount) > 0 {
|
||||
for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- {
|
||||
f2 := math.Float64bits(float64(m.PositiveCount[iNdEx]))
|
||||
f5 := math.Float64bits(float64(m.PositiveCount[iNdEx]))
|
||||
i -= 8
|
||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2))
|
||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
|
||||
}
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8))
|
||||
i--
|
||||
dAtA[i] = 0x72
|
||||
}
|
||||
if len(m.PositiveDelta) > 0 {
|
||||
var j3 int
|
||||
dAtA5 := make([]byte, len(m.PositiveDelta)*10)
|
||||
var j6 int
|
||||
dAtA8 := make([]byte, len(m.PositiveDelta)*10)
|
||||
for _, num := range m.PositiveDelta {
|
||||
x4 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||
for x4 >= 1<<7 {
|
||||
dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80)
|
||||
j3++
|
||||
x4 >>= 7
|
||||
x7 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||
for x7 >= 1<<7 {
|
||||
dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
|
||||
j6++
|
||||
x7 >>= 7
|
||||
}
|
||||
dAtA5[j3] = uint8(x4)
|
||||
j3++
|
||||
dAtA8[j6] = uint8(x7)
|
||||
j6++
|
||||
}
|
||||
i -= j3
|
||||
copy(dAtA[i:], dAtA5[:j3])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(j3))
|
||||
i -= j6
|
||||
copy(dAtA[i:], dAtA8[:j6])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(j6))
|
||||
i--
|
||||
dAtA[i] = 0x6a
|
||||
}
|
||||
|
@ -1314,30 +1376,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
}
|
||||
if len(m.NegativeCount) > 0 {
|
||||
for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- {
|
||||
f6 := math.Float64bits(float64(m.NegativeCount[iNdEx]))
|
||||
f9 := math.Float64bits(float64(m.NegativeCount[iNdEx]))
|
||||
i -= 8
|
||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
|
||||
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9))
|
||||
}
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8))
|
||||
i--
|
||||
dAtA[i] = 0x5a
|
||||
}
|
||||
if len(m.NegativeDelta) > 0 {
|
||||
var j7 int
|
||||
dAtA9 := make([]byte, len(m.NegativeDelta)*10)
|
||||
var j10 int
|
||||
dAtA12 := make([]byte, len(m.NegativeDelta)*10)
|
||||
for _, num := range m.NegativeDelta {
|
||||
x8 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||
for x8 >= 1<<7 {
|
||||
dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80)
|
||||
j7++
|
||||
x8 >>= 7
|
||||
x11 := (uint64(num) << 1) ^ uint64((num >> 63))
|
||||
for x11 >= 1<<7 {
|
||||
dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80)
|
||||
j10++
|
||||
x11 >>= 7
|
||||
}
|
||||
dAtA9[j7] = uint8(x8)
|
||||
j7++
|
||||
dAtA12[j10] = uint8(x11)
|
||||
j10++
|
||||
}
|
||||
i -= j7
|
||||
copy(dAtA[i:], dAtA9[:j7])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(j7))
|
||||
i -= j10
|
||||
copy(dAtA[i:], dAtA12[:j10])
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(j10))
|
||||
i--
|
||||
dAtA[i] = 0x52
|
||||
}
|
||||
|
@ -1788,6 +1850,10 @@ func (m *Counter) Size() (n int) {
|
|||
l = m.Exemplar.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
l = m.CreatedTimestamp.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -1830,6 +1896,10 @@ func (m *Summary) Size() (n int) {
|
|||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
l = m.CreatedTimestamp.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -1916,6 +1986,10 @@ func (m *Histogram) Size() (n int) {
|
|||
if len(m.PositiveCount) > 0 {
|
||||
n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8
|
||||
}
|
||||
if m.CreatedTimestamp != nil {
|
||||
l = m.CreatedTimestamp.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -2319,6 +2393,42 @@ func (m *Counter) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.CreatedTimestamp == nil {
|
||||
m.CreatedTimestamp = &types.Timestamp{}
|
||||
}
|
||||
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||
|
@ -2507,6 +2617,42 @@ func (m *Summary) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.CreatedTimestamp == nil {
|
||||
m.CreatedTimestamp = &types.Timestamp{}
|
||||
}
|
||||
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||
|
@ -3089,6 +3235,42 @@ func (m *Histogram) Unmarshal(dAtA []byte) error {
|
|||
} else {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType)
|
||||
}
|
||||
case 15:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.CreatedTimestamp == nil {
|
||||
m.CreatedTimestamp = &types.Timestamp{}
|
||||
}
|
||||
if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||
|
|
|
@ -51,6 +51,8 @@ message Gauge {
|
|||
message Counter {
|
||||
double value = 1;
|
||||
Exemplar exemplar = 2;
|
||||
|
||||
google.protobuf.Timestamp created_timestamp = 3;
|
||||
}
|
||||
|
||||
message Quantile {
|
||||
|
@ -62,6 +64,8 @@ message Summary {
|
|||
uint64 sample_count = 1;
|
||||
double sample_sum = 2;
|
||||
repeated Quantile quantile = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
google.protobuf.Timestamp created_timestamp = 4;
|
||||
}
|
||||
|
||||
message Untyped {
|
||||
|
@ -75,6 +79,8 @@ message Histogram {
|
|||
// Buckets for the conventional histogram.
|
||||
repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
|
||||
|
||||
google.protobuf.Timestamp created_timestamp = 15;
|
||||
|
||||
// Everything below here is for native histograms (also known as sparse histograms).
|
||||
// Native histograms are an experimental feature without stability guarantees.
|
||||
|
||||
|
|
|
@ -1224,10 +1224,11 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
|
|||
enh.Out = result[:0] // Reuse result vector.
|
||||
warnings.Merge(ws)
|
||||
|
||||
ev.currentSamples += len(result)
|
||||
vecNumSamples := result.TotalSamples()
|
||||
ev.currentSamples += vecNumSamples
|
||||
// When we reset currentSamples to tempNumSamples during the next iteration of the loop it also
|
||||
// needs to include the samples from the result here, as they're still in memory.
|
||||
tempNumSamples += len(result)
|
||||
tempNumSamples += vecNumSamples
|
||||
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
||||
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
|
@ -1323,12 +1324,10 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele
|
|||
Range: subq.Range,
|
||||
VectorSelector: vs,
|
||||
}
|
||||
totalSamples := 0
|
||||
for _, s := range mat {
|
||||
totalSamples += len(s.Floats) + len(s.Histograms)
|
||||
vs.Series = append(vs.Series, NewStorageSeries(s))
|
||||
}
|
||||
return ms, totalSamples, ws
|
||||
return ms, mat.TotalSamples(), ws
|
||||
}
|
||||
|
||||
// eval evaluates the given expression as the given AST expression node requires.
|
||||
|
@ -1470,7 +1469,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
it := storage.NewBuffer(selRange)
|
||||
var chkIter chunkenc.Iterator
|
||||
for i, s := range selVS.Series {
|
||||
ev.currentSamples -= len(floats) + len(histograms)
|
||||
ev.currentSamples -= len(floats) + totalHPointSize(histograms)
|
||||
if floats != nil {
|
||||
floats = floats[:0]
|
||||
}
|
||||
|
@ -1514,7 +1513,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
// Make the function call.
|
||||
outVec, annos := call(inArgs, e.Args, enh)
|
||||
warnings.Merge(annos)
|
||||
ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+len(histograms)))
|
||||
ev.samplesStats.IncrementSamplesAtStep(step, int64(len(floats)+totalHPointSize(histograms)))
|
||||
|
||||
enh.Out = outVec[:0]
|
||||
if len(outVec) > 0 {
|
||||
|
@ -1533,10 +1532,11 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
// Only buffer stepRange milliseconds from the second step on.
|
||||
it.ReduceDelta(stepRange)
|
||||
}
|
||||
if len(ss.Floats)+len(ss.Histograms) > 0 {
|
||||
if ev.currentSamples+len(ss.Floats)+len(ss.Histograms) <= ev.maxSamples {
|
||||
histSamples := totalHPointSize(ss.Histograms)
|
||||
if len(ss.Floats)+histSamples > 0 {
|
||||
if ev.currentSamples+len(ss.Floats)+histSamples <= ev.maxSamples {
|
||||
mat = append(mat, ss)
|
||||
ev.currentSamples += len(ss.Floats) + len(ss.Histograms)
|
||||
ev.currentSamples += len(ss.Floats) + histSamples
|
||||
} else {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
}
|
||||
|
@ -1545,7 +1545,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
}
|
||||
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
||||
|
||||
ev.currentSamples -= len(floats) + len(histograms)
|
||||
ev.currentSamples -= len(floats) + totalHPointSize(histograms)
|
||||
putFPointSlice(floats)
|
||||
putHPointSlice(histograms)
|
||||
|
||||
|
@ -1692,14 +1692,18 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
ss.Floats = getFPointSlice(numSteps)
|
||||
}
|
||||
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
|
||||
ev.currentSamples++
|
||||
ev.samplesStats.IncrementSamplesAtStep(step, 1)
|
||||
} else {
|
||||
if ss.Histograms == nil {
|
||||
ss.Histograms = getHPointSlice(numSteps)
|
||||
}
|
||||
ss.Histograms = append(ss.Histograms, HPoint{H: h, T: ts})
|
||||
point := HPoint{H: h, T: ts}
|
||||
ss.Histograms = append(ss.Histograms, point)
|
||||
histSize := point.size()
|
||||
ev.currentSamples += histSize
|
||||
ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize))
|
||||
}
|
||||
ev.samplesStats.IncrementSamplesAtStep(step, 1)
|
||||
ev.currentSamples++
|
||||
} else {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
}
|
||||
|
@ -1807,13 +1811,15 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
|
|||
T: ts,
|
||||
F: mat[i].Floats[0].F,
|
||||
})
|
||||
ev.currentSamples++
|
||||
} else {
|
||||
mat[i].Histograms = append(mat[i].Histograms, HPoint{
|
||||
point := HPoint{
|
||||
T: ts,
|
||||
H: mat[i].Histograms[0].H,
|
||||
})
|
||||
}
|
||||
mat[i].Histograms = append(mat[i].Histograms, point)
|
||||
ev.currentSamples += point.size()
|
||||
}
|
||||
ev.currentSamples++
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
}
|
||||
|
@ -1857,9 +1863,14 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
|
|||
F: f,
|
||||
H: h,
|
||||
})
|
||||
|
||||
histSize := 0
|
||||
if h != nil {
|
||||
histSize := h.Size() / 16 // 16 bytes per sample.
|
||||
ev.currentSamples += histSize
|
||||
}
|
||||
ev.currentSamples++
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1)
|
||||
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, int64(1+histSize))
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
}
|
||||
|
@ -1981,10 +1992,10 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota
|
|||
}
|
||||
|
||||
ss.Floats, ss.Histograms = ev.matrixIterSlice(it, mint, maxt, nil, nil)
|
||||
totalLen := int64(len(ss.Floats)) + int64(len(ss.Histograms))
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalLen)
|
||||
totalSize := int64(len(ss.Floats)) + int64(totalHPointSize(ss.Histograms))
|
||||
ev.samplesStats.IncrementSamplesAtTimestamp(ev.startTimestamp, totalSize)
|
||||
|
||||
if totalLen > 0 {
|
||||
if totalSize > 0 {
|
||||
matrix = append(matrix, ss)
|
||||
} else {
|
||||
putFPointSlice(ss.Floats)
|
||||
|
@ -2040,13 +2051,13 @@ func (ev *evaluator) matrixIterSlice(
|
|||
var drop int
|
||||
for drop = 0; histograms[drop].T < mint; drop++ { // nolint:revive
|
||||
}
|
||||
ev.currentSamples -= drop
|
||||
copy(histograms, histograms[drop:])
|
||||
histograms = histograms[:len(histograms)-drop]
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
// Only append points with timestamps after the last timestamp we have.
|
||||
mintHistograms = histograms[len(histograms)-1].T + 1
|
||||
} else {
|
||||
ev.currentSamples -= len(histograms)
|
||||
ev.currentSamples -= totalHPointSize(histograms)
|
||||
if histograms != nil {
|
||||
histograms = histograms[:0]
|
||||
}
|
||||
|
@ -2075,11 +2086,12 @@ loop:
|
|||
if ev.currentSamples >= ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
}
|
||||
ev.currentSamples++
|
||||
point := HPoint{T: t, H: h}
|
||||
if histograms == nil {
|
||||
histograms = getHPointSlice(16)
|
||||
}
|
||||
histograms = append(histograms, HPoint{T: t, H: h})
|
||||
histograms = append(histograms, point)
|
||||
ev.currentSamples += point.size()
|
||||
}
|
||||
case chunkenc.ValFloat:
|
||||
t, f := buf.At()
|
||||
|
@ -2110,8 +2122,9 @@ loop:
|
|||
if histograms == nil {
|
||||
histograms = getHPointSlice(16)
|
||||
}
|
||||
histograms = append(histograms, HPoint{T: t, H: h})
|
||||
ev.currentSamples++
|
||||
point := HPoint{T: t, H: h}
|
||||
histograms = append(histograms, point)
|
||||
ev.currentSamples += point.size()
|
||||
}
|
||||
case chunkenc.ValFloat:
|
||||
t, f := it.At()
|
||||
|
|
|
@ -88,7 +88,11 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
return enh.Out, annos.Add(annotations.NewMixedFloatsHistogramsWarning(metricName, args[0].PositionRange()))
|
||||
}
|
||||
|
||||
if isCounter && !strings.HasSuffix(metricName, "_total") && !strings.HasSuffix(metricName, "_sum") && !strings.HasSuffix(metricName, "_count") {
|
||||
if isCounter && metricName != "" && len(samples.Floats) > 0 &&
|
||||
!strings.HasSuffix(metricName, "_total") &&
|
||||
!strings.HasSuffix(metricName, "_sum") &&
|
||||
!strings.HasSuffix(metricName, "_count") &&
|
||||
!strings.HasSuffix(metricName, "_bucket") {
|
||||
annos.Add(annotations.NewPossibleNonCounterInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
|
||||
|
@ -1168,10 +1172,14 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
|
||||
for _, mb := range enh.signatureToMetricWithBuckets {
|
||||
if len(mb.buckets) > 0 {
|
||||
res, forcedMonotonicity := bucketQuantile(q, mb.buckets)
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: mb.metric,
|
||||
F: bucketQuantile(q, mb.buckets),
|
||||
F: res,
|
||||
})
|
||||
if forcedMonotonicity {
|
||||
annos.Add(annotations.NewHistogramQuantileForcedMonotonicityInfo(mb.metric.Get(labels.MetricName), args[1].PositionRange()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -71,15 +71,17 @@ type metricWithBuckets struct {
|
|||
// If q<0, -Inf is returned.
|
||||
//
|
||||
// If q>1, +Inf is returned.
|
||||
func bucketQuantile(q float64, buckets buckets) float64 {
|
||||
//
|
||||
// We also return a bool to indicate if monotonicity needed to be forced.
|
||||
func bucketQuantile(q float64, buckets buckets) (float64, bool) {
|
||||
if math.IsNaN(q) {
|
||||
return math.NaN()
|
||||
return math.NaN(), false
|
||||
}
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
return math.Inf(-1), false
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
return math.Inf(+1), false
|
||||
}
|
||||
slices.SortFunc(buckets, func(a, b bucket) int {
|
||||
// We don't expect the bucket boundary to be a NaN.
|
||||
|
@ -92,27 +94,27 @@ func bucketQuantile(q float64, buckets buckets) float64 {
|
|||
return 0
|
||||
})
|
||||
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
|
||||
return math.NaN()
|
||||
return math.NaN(), false
|
||||
}
|
||||
|
||||
buckets = coalesceBuckets(buckets)
|
||||
ensureMonotonic(buckets)
|
||||
forcedMonotonic := ensureMonotonic(buckets)
|
||||
|
||||
if len(buckets) < 2 {
|
||||
return math.NaN()
|
||||
return math.NaN(), false
|
||||
}
|
||||
observations := buckets[len(buckets)-1].count
|
||||
if observations == 0 {
|
||||
return math.NaN()
|
||||
return math.NaN(), false
|
||||
}
|
||||
rank := q * observations
|
||||
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
|
||||
|
||||
if b == len(buckets)-1 {
|
||||
return buckets[len(buckets)-2].upperBound
|
||||
return buckets[len(buckets)-2].upperBound, forcedMonotonic
|
||||
}
|
||||
if b == 0 && buckets[0].upperBound <= 0 {
|
||||
return buckets[0].upperBound
|
||||
return buckets[0].upperBound, forcedMonotonic
|
||||
}
|
||||
var (
|
||||
bucketStart float64
|
||||
|
@ -124,7 +126,7 @@ func bucketQuantile(q float64, buckets buckets) float64 {
|
|||
count -= buckets[b-1].count
|
||||
rank -= buckets[b-1].count
|
||||
}
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count), forcedMonotonic
|
||||
}
|
||||
|
||||
// histogramQuantile calculates the quantile 'q' based on the given histogram.
|
||||
|
@ -342,37 +344,24 @@ func coalesceBuckets(buckets buckets) buckets {
|
|||
// The assumption that bucket counts increase monotonically with increasing
|
||||
// upperBound may be violated during:
|
||||
//
|
||||
// * Recording rule evaluation of histogram_quantile, especially when rate()
|
||||
// has been applied to the underlying bucket timeseries.
|
||||
// * Evaluation of histogram_quantile computed over federated bucket
|
||||
// timeseries, especially when rate() has been applied.
|
||||
//
|
||||
// This is because scraped data is not made available to rule evaluation or
|
||||
// federation atomically, so some buckets are computed with data from the
|
||||
// most recent scrapes, but the other buckets are missing data from the most
|
||||
// recent scrape.
|
||||
// - Circumstances where data is already inconsistent at the target's side.
|
||||
// - Ingestion via the remote write receiver that Prometheus implements.
|
||||
// - Optimisation of query execution where precision is sacrificed for other
|
||||
// benefits, not by Prometheus but by systems built on top of it.
|
||||
//
|
||||
// Monotonicity is usually guaranteed because if a bucket with upper bound
|
||||
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// Randomly interspersed partial sampling breaks that guarantee, and rate()
|
||||
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
|
||||
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
|
||||
// monotonicity is broken. It is exacerbated by rate() because under normal
|
||||
// operation, cumulative counting of buckets will cause the bucket counts to
|
||||
// diverge such that small differences from missing samples are not a problem.
|
||||
// rate() removes this divergence.)
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// bucketQuantile depends on that monotonicity to do a binary search for the
|
||||
// bucket with the φ-quantile count, so breaking the monotonicity
|
||||
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
|
||||
//
|
||||
// As a somewhat hacky solution until ingestion is atomic per scrape, we
|
||||
// calculate the "envelope" of the histogram buckets, essentially removing
|
||||
// any decreases in the count between successive buckets.
|
||||
|
||||
func ensureMonotonic(buckets buckets) {
|
||||
// As a somewhat hacky solution, we calculate the "envelope" of the histogram
|
||||
// buckets, essentially removing any decreases in the count between successive
|
||||
// buckets. We return a bool to indicate if this monotonicity was forced or not.
|
||||
func ensureMonotonic(buckets buckets) bool {
|
||||
forced := false
|
||||
max := buckets[0].count
|
||||
for i := 1; i < len(buckets); i++ {
|
||||
switch {
|
||||
|
@ -380,8 +369,10 @@ func ensureMonotonic(buckets buckets) {
|
|||
max = buckets[i].count
|
||||
case buckets[i].count < max:
|
||||
buckets[i].count = max
|
||||
forced = true
|
||||
}
|
||||
}
|
||||
return forced
|
||||
}
|
||||
|
||||
// quantile calculates the given quantile of a vector of samples.
|
||||
|
|
|
@ -168,6 +168,23 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
|
|||
return json.Marshal([...]interface{}{float64(p.T) / 1000, h})
|
||||
}
|
||||
|
||||
// size returns the size of the HPoint compared to the size of an FPoint.
|
||||
// The total size is calculated considering the histogram timestamp (p.T - 8 bytes),
|
||||
// and then a number of bytes in the histogram.
|
||||
// This sum is divided by 16, as samples are 16 bytes.
|
||||
func (p HPoint) size() int {
|
||||
return (p.H.Size() + 8) / 16
|
||||
}
|
||||
|
||||
// totalHPointSize returns the total number of samples in the given slice of HPoints.
|
||||
func totalHPointSize(histograms []HPoint) int {
|
||||
var total int
|
||||
for _, h := range histograms {
|
||||
total += h.size()
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// Sample is a single sample belonging to a metric. It represents either a float
|
||||
// sample or a histogram sample. If H is nil, it is a float sample. Otherwise,
|
||||
// it is a histogram sample.
|
||||
|
@ -226,6 +243,21 @@ func (vec Vector) String() string {
|
|||
return strings.Join(entries, "\n")
|
||||
}
|
||||
|
||||
// TotalSamples returns the total number of samples in the series within a vector.
|
||||
// Float samples have a weight of 1 in this number, while histogram samples have a higher
|
||||
// weight according to their size compared with the size of a float sample.
|
||||
// See HPoint.size for details.
|
||||
func (vec Vector) TotalSamples() int {
|
||||
numSamples := 0
|
||||
for _, sample := range vec {
|
||||
numSamples++
|
||||
if sample.H != nil {
|
||||
numSamples += sample.H.Size() / 16
|
||||
}
|
||||
}
|
||||
return numSamples
|
||||
}
|
||||
|
||||
// ContainsSameLabelset checks if a vector has samples with the same labelset
|
||||
// Such a behavior is semantically undefined
|
||||
// https://github.com/prometheus/prometheus/issues/4562
|
||||
|
@ -264,10 +296,13 @@ func (m Matrix) String() string {
|
|||
}
|
||||
|
||||
// TotalSamples returns the total number of samples in the series within a matrix.
|
||||
// Float samples have a weight of 1 in this number, while histogram samples have a higher
|
||||
// weight according to their size compared with the size of a float sample.
|
||||
// See HPoint.size for details.
|
||||
func (m Matrix) TotalSamples() int {
|
||||
numSamples := 0
|
||||
for _, series := range m {
|
||||
numSamples += len(series.Floats) + len(series.Histograms)
|
||||
numSamples += len(series.Floats) + totalHPointSize(series.Histograms)
|
||||
}
|
||||
return numSamples
|
||||
}
|
||||
|
|
863
rules/group.go
Normal file
863
rules/group.go
Normal file
|
@ -0,0 +1,863 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
// Group is a set of rules that have a logical relation.
|
||||
type Group struct {
|
||||
name string
|
||||
file string
|
||||
interval time.Duration
|
||||
limit int
|
||||
rules []Rule
|
||||
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
|
||||
staleSeries []labels.Labels
|
||||
opts *ManagerOptions
|
||||
mtx sync.Mutex
|
||||
evaluationTime time.Duration
|
||||
lastEvaluation time.Time // Wall-clock time of most recent evaluation.
|
||||
lastEvalTimestamp time.Time // Time slot used for most recent evaluation.
|
||||
|
||||
shouldRestore bool
|
||||
|
||||
markStale bool
|
||||
done chan struct{}
|
||||
terminated chan struct{}
|
||||
managerDone chan struct{}
|
||||
|
||||
logger log.Logger
|
||||
|
||||
metrics *Metrics
|
||||
|
||||
// Rule group evaluation iteration function,
|
||||
// defaults to DefaultEvalIterationFunc.
|
||||
evalIterationFunc GroupEvalIterationFunc
|
||||
}
|
||||
|
||||
// GroupEvalIterationFunc is used to implement and extend rule group
|
||||
// evaluation iteration logic. It is configured in Group.evalIterationFunc,
|
||||
// and periodically invoked at each group evaluation interval to
|
||||
// evaluate the rules in the group at that point in time.
|
||||
// DefaultEvalIterationFunc is the default implementation.
|
||||
type GroupEvalIterationFunc func(ctx context.Context, g *Group, evalTimestamp time.Time)
|
||||
|
||||
type GroupOptions struct {
|
||||
Name, File string
|
||||
Interval time.Duration
|
||||
Limit int
|
||||
Rules []Rule
|
||||
ShouldRestore bool
|
||||
Opts *ManagerOptions
|
||||
done chan struct{}
|
||||
EvalIterationFunc GroupEvalIterationFunc
|
||||
}
|
||||
|
||||
// NewGroup makes a new Group with the given name, options, and rules.
|
||||
func NewGroup(o GroupOptions) *Group {
|
||||
metrics := o.Opts.Metrics
|
||||
if metrics == nil {
|
||||
metrics = NewGroupMetrics(o.Opts.Registerer)
|
||||
}
|
||||
|
||||
key := GroupKey(o.File, o.Name)
|
||||
metrics.IterationsMissed.WithLabelValues(key)
|
||||
metrics.IterationsScheduled.WithLabelValues(key)
|
||||
metrics.EvalTotal.WithLabelValues(key)
|
||||
metrics.EvalFailures.WithLabelValues(key)
|
||||
metrics.GroupLastEvalTime.WithLabelValues(key)
|
||||
metrics.GroupLastDuration.WithLabelValues(key)
|
||||
metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
|
||||
metrics.GroupSamples.WithLabelValues(key)
|
||||
metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
|
||||
|
||||
evalIterationFunc := o.EvalIterationFunc
|
||||
if evalIterationFunc == nil {
|
||||
evalIterationFunc = DefaultEvalIterationFunc
|
||||
}
|
||||
|
||||
return &Group{
|
||||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
opts: o.Opts,
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
|
||||
done: make(chan struct{}),
|
||||
managerDone: o.done,
|
||||
terminated: make(chan struct{}),
|
||||
logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name),
|
||||
metrics: metrics,
|
||||
evalIterationFunc: evalIterationFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the group name.
|
||||
func (g *Group) Name() string { return g.name }
|
||||
|
||||
// File returns the group's file.
|
||||
func (g *Group) File() string { return g.file }
|
||||
|
||||
// Rules returns the group's rules.
|
||||
func (g *Group) Rules() []Rule { return g.rules }
|
||||
|
||||
// Queryable returns the group's querable.
|
||||
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
|
||||
|
||||
// Context returns the group's context.
|
||||
func (g *Group) Context() context.Context { return g.opts.Context }
|
||||
|
||||
// Interval returns the group's interval.
|
||||
func (g *Group) Interval() time.Duration { return g.interval }
|
||||
|
||||
// Limit returns the group's limit.
|
||||
func (g *Group) Limit() int { return g.limit }
|
||||
|
||||
func (g *Group) Logger() log.Logger { return g.logger }
|
||||
|
||||
func (g *Group) run(ctx context.Context) {
|
||||
defer close(g.terminated)
|
||||
|
||||
// Wait an initial amount to have consistently slotted intervals.
|
||||
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.interval)
|
||||
select {
|
||||
case <-time.After(time.Until(evalTimestamp)):
|
||||
case <-g.done:
|
||||
return
|
||||
}
|
||||
|
||||
ctx = promql.NewOriginContext(ctx, map[string]interface{}{
|
||||
"ruleGroup": map[string]string{
|
||||
"file": g.File(),
|
||||
"name": g.Name(),
|
||||
},
|
||||
})
|
||||
|
||||
// The assumption here is that since the ticker was started after having
|
||||
// waited for `evalTimestamp` to pass, the ticks will trigger soon
|
||||
// after each `evalTimestamp + N * g.interval` occurrence.
|
||||
tick := time.NewTicker(g.interval)
|
||||
defer tick.Stop()
|
||||
|
||||
defer func() {
|
||||
if !g.markStale {
|
||||
return
|
||||
}
|
||||
go func(now time.Time) {
|
||||
for _, rule := range g.seriesInPreviousEval {
|
||||
for _, r := range rule {
|
||||
g.staleSeries = append(g.staleSeries, r)
|
||||
}
|
||||
}
|
||||
// That can be garbage collected at this point.
|
||||
g.seriesInPreviousEval = nil
|
||||
// Wait for 2 intervals to give the opportunity to renamed rules
|
||||
// to insert new series in the tsdb. At this point if there is a
|
||||
// renamed rule, it should already be started.
|
||||
select {
|
||||
case <-g.managerDone:
|
||||
case <-time.After(2 * g.interval):
|
||||
g.cleanupStaleSeries(ctx, now)
|
||||
}
|
||||
}(time.Now())
|
||||
}()
|
||||
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
if g.shouldRestore {
|
||||
// If we have to restore, we wait for another Eval to finish.
|
||||
// The reason behind this is, during first eval (or before it)
|
||||
// we might not have enough data scraped, and recording rules would not
|
||||
// have updated the latest values, on which some alerts might depend.
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
missed := (time.Since(evalTimestamp) / g.interval) - 1
|
||||
if missed > 0 {
|
||||
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
}
|
||||
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
}
|
||||
|
||||
g.RestoreForState(time.Now())
|
||||
g.shouldRestore = false
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
missed := (time.Since(evalTimestamp) / g.interval) - 1
|
||||
if missed > 0 {
|
||||
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
}
|
||||
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
|
||||
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Group) stop() {
|
||||
close(g.done)
|
||||
<-g.terminated
|
||||
}
|
||||
|
||||
func (g *Group) hash() uint64 {
|
||||
l := labels.New(
|
||||
labels.Label{Name: "name", Value: g.name},
|
||||
labels.Label{Name: "file", Value: g.file},
|
||||
)
|
||||
return l.Hash()
|
||||
}
|
||||
|
||||
// AlertingRules returns the list of the group's alerting rules.
|
||||
func (g *Group) AlertingRules() []*AlertingRule {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
|
||||
var alerts []*AlertingRule
|
||||
for _, rule := range g.rules {
|
||||
if alertingRule, ok := rule.(*AlertingRule); ok {
|
||||
alerts = append(alerts, alertingRule)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
|
||||
if a.State() == b.State() {
|
||||
return strings.Compare(a.Name(), b.Name())
|
||||
}
|
||||
return int(b.State() - a.State())
|
||||
})
|
||||
return alerts
|
||||
}
|
||||
|
||||
// HasAlertingRules returns true if the group contains at least one AlertingRule.
|
||||
func (g *Group) HasAlertingRules() bool {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
|
||||
for _, rule := range g.rules {
|
||||
if _, ok := rule.(*AlertingRule); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetEvaluationTime returns the time in seconds it took to evaluate the rule group.
|
||||
func (g *Group) GetEvaluationTime() time.Duration {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.evaluationTime
|
||||
}
|
||||
|
||||
// setEvaluationTime sets the time in seconds the last evaluation took.
|
||||
func (g *Group) setEvaluationTime(dur time.Duration) {
|
||||
g.metrics.GroupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds())
|
||||
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.evaluationTime = dur
|
||||
}
|
||||
|
||||
// GetLastEvaluation returns the time the last evaluation of the rule group took place.
|
||||
func (g *Group) GetLastEvaluation() time.Time {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.lastEvaluation
|
||||
}
|
||||
|
||||
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
|
||||
func (g *Group) setLastEvaluation(ts time.Time) {
|
||||
g.metrics.GroupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
|
||||
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.lastEvaluation = ts
|
||||
}
|
||||
|
||||
// GetLastEvalTimestamp returns the timestamp of the last evaluation.
|
||||
func (g *Group) GetLastEvalTimestamp() time.Time {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.lastEvalTimestamp
|
||||
}
|
||||
|
||||
// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation.
|
||||
func (g *Group) setLastEvalTimestamp(ts time.Time) {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.lastEvalTimestamp = ts
|
||||
}
|
||||
|
||||
// EvalTimestamp returns the immediately preceding consistently slotted evaluation time.
|
||||
func (g *Group) EvalTimestamp(startTime int64) time.Time {
|
||||
var (
|
||||
offset = int64(g.hash() % uint64(g.interval))
|
||||
|
||||
// This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds.
|
||||
// But we can only use `% interval` to align with the interval. And `% interval` will always
|
||||
// align with the perfect time intervals, instead of this group's. Because of this we add
|
||||
// `offset` _after_ aligning with the perfect time interval.
|
||||
//
|
||||
// There can be cases where adding `offset` to the perfect evaluation time can yield a
|
||||
// timestamp in the future, which is not what EvalTimestamp should do.
|
||||
// So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an
|
||||
// evaluation time in the past.
|
||||
adjNow = startTime - offset
|
||||
|
||||
// Adjust to perfect evaluation intervals.
|
||||
base = adjNow - (adjNow % int64(g.interval))
|
||||
|
||||
// Add one offset to randomize the evaluation times of this group.
|
||||
next = base + offset
|
||||
)
|
||||
|
||||
return time.Unix(0, next).UTC()
|
||||
}
|
||||
|
||||
func nameAndLabels(rule Rule) string {
|
||||
return rule.Name() + rule.Labels().String()
|
||||
}
|
||||
|
||||
// CopyState copies the alerting rule and staleness related state from the given group.
|
||||
//
|
||||
// Rules are matched based on their name and labels. If there are duplicates, the
|
||||
// first is matched with the first, second with the second etc.
|
||||
func (g *Group) CopyState(from *Group) {
|
||||
g.evaluationTime = from.evaluationTime
|
||||
g.lastEvaluation = from.lastEvaluation
|
||||
|
||||
ruleMap := make(map[string][]int, len(from.rules))
|
||||
|
||||
for fi, fromRule := range from.rules {
|
||||
nameAndLabels := nameAndLabels(fromRule)
|
||||
l := ruleMap[nameAndLabels]
|
||||
ruleMap[nameAndLabels] = append(l, fi)
|
||||
}
|
||||
|
||||
for i, rule := range g.rules {
|
||||
nameAndLabels := nameAndLabels(rule)
|
||||
indexes := ruleMap[nameAndLabels]
|
||||
if len(indexes) == 0 {
|
||||
continue
|
||||
}
|
||||
fi := indexes[0]
|
||||
g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
|
||||
ruleMap[nameAndLabels] = indexes[1:]
|
||||
|
||||
ar, ok := rule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
far, ok := from.rules[fi].(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for fp, a := range far.active {
|
||||
ar.active[fp] = a
|
||||
}
|
||||
}
|
||||
|
||||
// Handle deleted and unmatched duplicate rules.
|
||||
g.staleSeries = from.staleSeries
|
||||
for fi, fromRule := range from.rules {
|
||||
nameAndLabels := nameAndLabels(fromRule)
|
||||
l := ruleMap[nameAndLabels]
|
||||
if len(l) != 0 {
|
||||
for _, series := range from.seriesInPreviousEval[fi] {
|
||||
g.staleSeries = append(g.staleSeries, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
|
||||
func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
||||
var samplesTotal float64
|
||||
for i, rule := range g.rules {
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
func(i int, rule Rule) {
|
||||
ctx, sp := otel.Tracer("").Start(ctx, "rule")
|
||||
sp.SetAttributes(attribute.String("name", rule.Name()))
|
||||
defer func(t time.Time) {
|
||||
sp.End()
|
||||
|
||||
since := time.Since(t)
|
||||
g.metrics.EvalDuration.Observe(since.Seconds())
|
||||
rule.SetEvaluationDuration(since)
|
||||
rule.SetEvaluationTimestamp(t)
|
||||
}(time.Now())
|
||||
|
||||
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
// Canceled queries are intentional termination of queries. This normally
|
||||
// happens on shutdown and thus we skip logging of any errors here.
|
||||
var eqc promql.ErrQueryCanceled
|
||||
if !errors.As(err, &eqc) {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
rule.SetHealth(HealthGood)
|
||||
rule.SetLastError(nil)
|
||||
samplesTotal += float64(len(vector))
|
||||
|
||||
if ar, ok := rule.(*AlertingRule); ok {
|
||||
ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
|
||||
}
|
||||
var (
|
||||
numOutOfOrder = 0
|
||||
numTooOld = 0
|
||||
numDuplicates = 0
|
||||
)
|
||||
|
||||
app := g.opts.Appendable.Appender(ctx)
|
||||
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
|
||||
defer func() {
|
||||
if err := app.Commit(); err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule sample appending failed", "err", err)
|
||||
return
|
||||
}
|
||||
g.seriesInPreviousEval[i] = seriesReturned
|
||||
}()
|
||||
|
||||
for _, s := range vector {
|
||||
if s.H != nil {
|
||||
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
|
||||
} else {
|
||||
_, err = app.Append(0, s.Metric, s.T, s.F)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
|
||||
numOutOfOrder++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
|
||||
numTooOld++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
numDuplicates++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
default:
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
}
|
||||
} else {
|
||||
buf := [1024]byte{}
|
||||
seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric
|
||||
}
|
||||
}
|
||||
if numOutOfOrder > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
|
||||
}
|
||||
if numTooOld > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld)
|
||||
}
|
||||
if numDuplicates > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
|
||||
}
|
||||
|
||||
for metric, lset := range g.seriesInPreviousEval[i] {
|
||||
if _, ok := seriesReturned[metric]; !ok {
|
||||
// Series no longer exposed, mark it stale.
|
||||
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
|
||||
errors.Is(unwrappedErr, storage.ErrTooOldSample),
|
||||
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Adding stale sample failed", "sample", lset.String(), "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(i, rule)
|
||||
}
|
||||
if g.metrics != nil {
|
||||
g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal)
|
||||
}
|
||||
g.cleanupStaleSeries(ctx, ts)
|
||||
}
|
||||
|
||||
func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
|
||||
if len(g.staleSeries) == 0 {
|
||||
return
|
||||
}
|
||||
app := g.opts.Appendable.Appender(ctx)
|
||||
for _, s := range g.staleSeries {
|
||||
// Rule that produced series no longer configured, mark it stale.
|
||||
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
|
||||
errors.Is(unwrappedErr, storage.ErrTooOldSample),
|
||||
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err)
|
||||
}
|
||||
}
|
||||
if err := app.Commit(); err != nil {
|
||||
level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err)
|
||||
} else {
|
||||
g.staleSeries = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreForState restores the 'for' state of the alerts
|
||||
// by looking up last ActiveAt from storage.
|
||||
func (g *Group) RestoreForState(ts time.Time) {
|
||||
maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
|
||||
// We allow restoration only if alerts were active before after certain time.
|
||||
mint := ts.Add(-g.opts.OutageTolerance)
|
||||
mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
|
||||
q, err := g.opts.Queryable.Querier(mintMS, maxtMS)
|
||||
if err != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := q.Close(); err != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, rule := range g.Rules() {
|
||||
alertRule, ok := rule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
alertHoldDuration := alertRule.HoldDuration()
|
||||
if alertHoldDuration < g.opts.ForGracePeriod {
|
||||
// If alertHoldDuration is already less than grace period, we would not
|
||||
// like to make it wait for `g.opts.ForGracePeriod` time before firing.
|
||||
// Hence we skip restoration, which will make it wait for alertHoldDuration.
|
||||
alertRule.SetRestored(true)
|
||||
continue
|
||||
}
|
||||
|
||||
alertRule.ForEachActiveAlert(func(a *Alert) {
|
||||
var s storage.Series
|
||||
|
||||
s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q)
|
||||
if err != nil {
|
||||
// Querier Warnings are ignored. We do not care unless we have an error.
|
||||
level.Error(g.logger).Log(
|
||||
"msg", "Failed to restore 'for' state",
|
||||
labels.AlertName, alertRule.Name(),
|
||||
"stage", "Select",
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Series found for the 'for' state.
|
||||
var t int64
|
||||
var v float64
|
||||
it := s.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v = it.At()
|
||||
}
|
||||
if it.Err() != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
|
||||
labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
|
||||
return
|
||||
}
|
||||
if value.IsStaleNaN(v) { // Alert was not active.
|
||||
return
|
||||
}
|
||||
|
||||
downAt := time.Unix(t/1000, 0).UTC()
|
||||
restoredActiveAt := time.Unix(int64(v), 0).UTC()
|
||||
timeSpentPending := downAt.Sub(restoredActiveAt)
|
||||
timeRemainingPending := alertHoldDuration - timeSpentPending
|
||||
|
||||
switch {
|
||||
case timeRemainingPending <= 0:
|
||||
// It means that alert was firing when prometheus went down.
|
||||
// In the next Eval, the state of this alert will be set back to
|
||||
// firing again if it's still firing in that Eval.
|
||||
// Nothing to be done in this case.
|
||||
case timeRemainingPending < g.opts.ForGracePeriod:
|
||||
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
|
||||
// /* new firing time */ /* moving back by hold duration */
|
||||
//
|
||||
// Proof of correctness:
|
||||
// firingTime = restoredActiveAt.Add(alertHoldDuration)
|
||||
// = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
|
||||
// = ts + m.opts.ForGracePeriod
|
||||
//
|
||||
// Time remaining to fire = firingTime.Sub(ts)
|
||||
// = (ts + m.opts.ForGracePeriod) - ts
|
||||
// = m.opts.ForGracePeriod
|
||||
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
|
||||
default:
|
||||
// By shifting ActiveAt to the future (ActiveAt + some_duration),
|
||||
// the total pending time from the original ActiveAt
|
||||
// would be `alertHoldDuration + some_duration`.
|
||||
// Here, some_duration = downDuration.
|
||||
downDuration := ts.Sub(downAt)
|
||||
restoredActiveAt = restoredActiveAt.Add(downDuration)
|
||||
}
|
||||
|
||||
a.ActiveAt = restoredActiveAt
|
||||
level.Debug(g.logger).Log("msg", "'for' state restored",
|
||||
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
|
||||
"labels", a.Labels.String())
|
||||
})
|
||||
|
||||
alertRule.SetRestored(true)
|
||||
}
|
||||
}
|
||||
|
||||
// Equals return if two groups are the same.
|
||||
func (g *Group) Equals(ng *Group) bool {
|
||||
if g.name != ng.name {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.file != ng.file {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.interval != ng.interval {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.limit != ng.limit {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(g.rules) != len(ng.rules) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, gr := range g.rules {
|
||||
if gr.String() != ng.rules[i].String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GroupKey group names need not be unique across filenames.
|
||||
func GroupKey(file, name string) string {
|
||||
return file + ";" + name
|
||||
}
|
||||
|
||||
// Constants for instrumentation.
|
||||
const namespace = "prometheus"
|
||||
|
||||
// Metrics for rule evaluation.
|
||||
type Metrics struct {
|
||||
EvalDuration prometheus.Summary
|
||||
IterationDuration prometheus.Summary
|
||||
IterationsMissed *prometheus.CounterVec
|
||||
IterationsScheduled *prometheus.CounterVec
|
||||
EvalTotal *prometheus.CounterVec
|
||||
EvalFailures *prometheus.CounterVec
|
||||
GroupInterval *prometheus.GaugeVec
|
||||
GroupLastEvalTime *prometheus.GaugeVec
|
||||
GroupLastDuration *prometheus.GaugeVec
|
||||
GroupRules *prometheus.GaugeVec
|
||||
GroupSamples *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
|
||||
// if not nil.
|
||||
func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
|
||||
m := &Metrics{
|
||||
EvalDuration: prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluation_duration_seconds",
|
||||
Help: "The duration for a rule to execute.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_duration_seconds",
|
||||
Help: "The duration of rule group evaluations.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
IterationsMissed: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_iterations_missed_total",
|
||||
Help: "The total number of rule group evaluations missed due to slow rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
IterationsScheduled: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_iterations_total",
|
||||
Help: "The total number of scheduled rule group evaluations, whether executed or missed.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
EvalTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluations_total",
|
||||
Help: "The total number of rule evaluations.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
EvalFailures: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluation_failures_total",
|
||||
Help: "The total number of rule evaluation failures.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupInterval: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_interval_seconds",
|
||||
Help: "The interval of a rule group.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupLastEvalTime: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_evaluation_timestamp_seconds",
|
||||
Help: "The timestamp of the last rule group evaluation in seconds.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupLastDuration: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_duration_seconds",
|
||||
Help: "The duration of the last rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupRules: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_rules",
|
||||
Help: "The number of rules.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupSamples: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_evaluation_samples",
|
||||
Help: "The number of samples returned during the last rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.MustRegister(
|
||||
m.EvalDuration,
|
||||
m.IterationDuration,
|
||||
m.IterationsMissed,
|
||||
m.IterationsScheduled,
|
||||
m.EvalTotal,
|
||||
m.EvalFailures,
|
||||
m.GroupInterval,
|
||||
m.GroupLastEvalTime,
|
||||
m.GroupLastDuration,
|
||||
m.GroupRules,
|
||||
m.GroupSamples,
|
||||
)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
871
rules/manager.go
871
rules/manager.go
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -26,162 +25,17 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/rulefmt"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
// RuleHealth describes the health state of a rule.
|
||||
type RuleHealth string
|
||||
|
||||
// The possible health states of a rule based on the last execution.
|
||||
const (
|
||||
HealthUnknown RuleHealth = "unknown"
|
||||
HealthGood RuleHealth = "ok"
|
||||
HealthBad RuleHealth = "err"
|
||||
)
|
||||
|
||||
// Constants for instrumentation.
|
||||
const namespace = "prometheus"
|
||||
|
||||
// Metrics for rule evaluation.
|
||||
type Metrics struct {
|
||||
EvalDuration prometheus.Summary
|
||||
IterationDuration prometheus.Summary
|
||||
IterationsMissed *prometheus.CounterVec
|
||||
IterationsScheduled *prometheus.CounterVec
|
||||
EvalTotal *prometheus.CounterVec
|
||||
EvalFailures *prometheus.CounterVec
|
||||
GroupInterval *prometheus.GaugeVec
|
||||
GroupLastEvalTime *prometheus.GaugeVec
|
||||
GroupLastDuration *prometheus.GaugeVec
|
||||
GroupRules *prometheus.GaugeVec
|
||||
GroupSamples *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
|
||||
// if not nil.
|
||||
func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
|
||||
m := &Metrics{
|
||||
EvalDuration: prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluation_duration_seconds",
|
||||
Help: "The duration for a rule to execute.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_duration_seconds",
|
||||
Help: "The duration of rule group evaluations.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
}),
|
||||
IterationsMissed: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_iterations_missed_total",
|
||||
Help: "The total number of rule group evaluations missed due to slow rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
IterationsScheduled: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_iterations_total",
|
||||
Help: "The total number of scheduled rule group evaluations, whether executed or missed.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
EvalTotal: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluations_total",
|
||||
Help: "The total number of rule evaluations.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
EvalFailures: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_evaluation_failures_total",
|
||||
Help: "The total number of rule evaluation failures.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupInterval: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_interval_seconds",
|
||||
Help: "The interval of a rule group.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupLastEvalTime: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_evaluation_timestamp_seconds",
|
||||
Help: "The timestamp of the last rule group evaluation in seconds.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupLastDuration: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_duration_seconds",
|
||||
Help: "The duration of the last rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupRules: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_rules",
|
||||
Help: "The number of rules.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
GroupSamples: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rule_group_last_evaluation_samples",
|
||||
Help: "The number of samples returned during the last rule group evaluation.",
|
||||
},
|
||||
[]string{"rule_group"},
|
||||
),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.MustRegister(
|
||||
m.EvalDuration,
|
||||
m.IterationDuration,
|
||||
m.IterationsMissed,
|
||||
m.IterationsScheduled,
|
||||
m.EvalTotal,
|
||||
m.EvalFailures,
|
||||
m.GroupInterval,
|
||||
m.GroupLastEvalTime,
|
||||
m.GroupLastDuration,
|
||||
m.GroupRules,
|
||||
m.GroupSamples,
|
||||
)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// QueryFunc processes PromQL queries.
|
||||
type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
|
||||
|
||||
|
@ -213,241 +67,6 @@ func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// A Rule encapsulates a vector expression which is evaluated at a specified
|
||||
// interval and acted upon (currently either recorded or used for alerting).
|
||||
type Rule interface {
|
||||
Name() string
|
||||
// Labels of the rule.
|
||||
Labels() labels.Labels
|
||||
// eval evaluates the rule, including any associated recording or alerting actions.
|
||||
Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error)
|
||||
// String returns a human-readable string representation of the rule.
|
||||
String() string
|
||||
// Query returns the rule query expression.
|
||||
Query() parser.Expr
|
||||
// SetLastErr sets the current error experienced by the rule.
|
||||
SetLastError(error)
|
||||
// LastErr returns the last error experienced by the rule.
|
||||
LastError() error
|
||||
// SetHealth sets the current health of the rule.
|
||||
SetHealth(RuleHealth)
|
||||
// Health returns the current health of the rule.
|
||||
Health() RuleHealth
|
||||
SetEvaluationDuration(time.Duration)
|
||||
// GetEvaluationDuration returns last evaluation duration.
|
||||
// NOTE: Used dynamically by rules.html template.
|
||||
GetEvaluationDuration() time.Duration
|
||||
SetEvaluationTimestamp(time.Time)
|
||||
// GetEvaluationTimestamp returns last evaluation timestamp.
|
||||
// NOTE: Used dynamically by rules.html template.
|
||||
GetEvaluationTimestamp() time.Time
|
||||
}
|
||||
|
||||
// Group is a set of rules that have a logical relation.
|
||||
type Group struct {
|
||||
name string
|
||||
file string
|
||||
interval time.Duration
|
||||
limit int
|
||||
rules []Rule
|
||||
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
|
||||
staleSeries []labels.Labels
|
||||
opts *ManagerOptions
|
||||
mtx sync.Mutex
|
||||
evaluationTime time.Duration
|
||||
lastEvaluation time.Time // Wall-clock time of most recent evaluation.
|
||||
lastEvalTimestamp time.Time // Time slot used for most recent evaluation.
|
||||
|
||||
shouldRestore bool
|
||||
|
||||
markStale bool
|
||||
done chan struct{}
|
||||
terminated chan struct{}
|
||||
managerDone chan struct{}
|
||||
|
||||
logger log.Logger
|
||||
|
||||
metrics *Metrics
|
||||
|
||||
// Rule group evaluation iteration function,
|
||||
// defaults to DefaultEvalIterationFunc.
|
||||
evalIterationFunc GroupEvalIterationFunc
|
||||
}
|
||||
|
||||
// GroupEvalIterationFunc is used to implement and extend rule group
|
||||
// evaluation iteration logic. It is configured in Group.evalIterationFunc,
|
||||
// and periodically invoked at each group evaluation interval to
|
||||
// evaluate the rules in the group at that point in time.
|
||||
// DefaultEvalIterationFunc is the default implementation.
|
||||
type GroupEvalIterationFunc func(ctx context.Context, g *Group, evalTimestamp time.Time)
|
||||
|
||||
type GroupOptions struct {
|
||||
Name, File string
|
||||
Interval time.Duration
|
||||
Limit int
|
||||
Rules []Rule
|
||||
ShouldRestore bool
|
||||
Opts *ManagerOptions
|
||||
done chan struct{}
|
||||
EvalIterationFunc GroupEvalIterationFunc
|
||||
}
|
||||
|
||||
// NewGroup makes a new Group with the given name, options, and rules.
|
||||
func NewGroup(o GroupOptions) *Group {
|
||||
metrics := o.Opts.Metrics
|
||||
if metrics == nil {
|
||||
metrics = NewGroupMetrics(o.Opts.Registerer)
|
||||
}
|
||||
|
||||
key := GroupKey(o.File, o.Name)
|
||||
metrics.IterationsMissed.WithLabelValues(key)
|
||||
metrics.IterationsScheduled.WithLabelValues(key)
|
||||
metrics.EvalTotal.WithLabelValues(key)
|
||||
metrics.EvalFailures.WithLabelValues(key)
|
||||
metrics.GroupLastEvalTime.WithLabelValues(key)
|
||||
metrics.GroupLastDuration.WithLabelValues(key)
|
||||
metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
|
||||
metrics.GroupSamples.WithLabelValues(key)
|
||||
metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
|
||||
|
||||
evalIterationFunc := o.EvalIterationFunc
|
||||
if evalIterationFunc == nil {
|
||||
evalIterationFunc = DefaultEvalIterationFunc
|
||||
}
|
||||
|
||||
return &Group{
|
||||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
opts: o.Opts,
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
|
||||
done: make(chan struct{}),
|
||||
managerDone: o.done,
|
||||
terminated: make(chan struct{}),
|
||||
logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name),
|
||||
metrics: metrics,
|
||||
evalIterationFunc: evalIterationFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the group name.
|
||||
func (g *Group) Name() string { return g.name }
|
||||
|
||||
// File returns the group's file.
|
||||
func (g *Group) File() string { return g.file }
|
||||
|
||||
// Rules returns the group's rules.
|
||||
func (g *Group) Rules() []Rule { return g.rules }
|
||||
|
||||
// Queryable returns the group's querable.
|
||||
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
|
||||
|
||||
// Context returns the group's context.
|
||||
func (g *Group) Context() context.Context { return g.opts.Context }
|
||||
|
||||
// Interval returns the group's interval.
|
||||
func (g *Group) Interval() time.Duration { return g.interval }
|
||||
|
||||
// Limit returns the group's limit.
|
||||
func (g *Group) Limit() int { return g.limit }
|
||||
|
||||
func (g *Group) Logger() log.Logger { return g.logger }
|
||||
|
||||
func (g *Group) run(ctx context.Context) {
|
||||
defer close(g.terminated)
|
||||
|
||||
// Wait an initial amount to have consistently slotted intervals.
|
||||
evalTimestamp := g.EvalTimestamp(time.Now().UnixNano()).Add(g.interval)
|
||||
select {
|
||||
case <-time.After(time.Until(evalTimestamp)):
|
||||
case <-g.done:
|
||||
return
|
||||
}
|
||||
|
||||
ctx = promql.NewOriginContext(ctx, map[string]interface{}{
|
||||
"ruleGroup": map[string]string{
|
||||
"file": g.File(),
|
||||
"name": g.Name(),
|
||||
},
|
||||
})
|
||||
|
||||
// The assumption here is that since the ticker was started after having
|
||||
// waited for `evalTimestamp` to pass, the ticks will trigger soon
|
||||
// after each `evalTimestamp + N * g.interval` occurrence.
|
||||
tick := time.NewTicker(g.interval)
|
||||
defer tick.Stop()
|
||||
|
||||
defer func() {
|
||||
if !g.markStale {
|
||||
return
|
||||
}
|
||||
go func(now time.Time) {
|
||||
for _, rule := range g.seriesInPreviousEval {
|
||||
for _, r := range rule {
|
||||
g.staleSeries = append(g.staleSeries, r)
|
||||
}
|
||||
}
|
||||
// That can be garbage collected at this point.
|
||||
g.seriesInPreviousEval = nil
|
||||
// Wait for 2 intervals to give the opportunity to renamed rules
|
||||
// to insert new series in the tsdb. At this point if there is a
|
||||
// renamed rule, it should already be started.
|
||||
select {
|
||||
case <-g.managerDone:
|
||||
case <-time.After(2 * g.interval):
|
||||
g.cleanupStaleSeries(ctx, now)
|
||||
}
|
||||
}(time.Now())
|
||||
}()
|
||||
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
if g.shouldRestore {
|
||||
// If we have to restore, we wait for another Eval to finish.
|
||||
// The reason behind this is, during first eval (or before it)
|
||||
// we might not have enough data scraped, and recording rules would not
|
||||
// have updated the latest values, on which some alerts might depend.
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
missed := (time.Since(evalTimestamp) / g.interval) - 1
|
||||
if missed > 0 {
|
||||
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
}
|
||||
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
}
|
||||
|
||||
g.RestoreForState(time.Now())
|
||||
g.shouldRestore = false
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
case <-tick.C:
|
||||
missed := (time.Since(evalTimestamp) / g.interval) - 1
|
||||
if missed > 0 {
|
||||
g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
|
||||
}
|
||||
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
|
||||
|
||||
g.evalIterationFunc(ctx, g, evalTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultEvalIterationFunc is the default implementation of
|
||||
// GroupEvalIterationFunc that is periodically invoked to evaluate the rules
|
||||
// in a group at a given point in time and updates Group state and metrics
|
||||
|
@ -467,491 +86,6 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.
|
|||
g.setLastEvalTimestamp(evalTimestamp)
|
||||
}
|
||||
|
||||
func (g *Group) stop() {
|
||||
close(g.done)
|
||||
<-g.terminated
|
||||
}
|
||||
|
||||
func (g *Group) hash() uint64 {
|
||||
l := labels.New(
|
||||
labels.Label{Name: "name", Value: g.name},
|
||||
labels.Label{Name: "file", Value: g.file},
|
||||
)
|
||||
return l.Hash()
|
||||
}
|
||||
|
||||
// AlertingRules returns the list of the group's alerting rules.
|
||||
func (g *Group) AlertingRules() []*AlertingRule {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
|
||||
var alerts []*AlertingRule
|
||||
for _, rule := range g.rules {
|
||||
if alertingRule, ok := rule.(*AlertingRule); ok {
|
||||
alerts = append(alerts, alertingRule)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(alerts, func(a, b *AlertingRule) int {
|
||||
if a.State() == b.State() {
|
||||
return strings.Compare(a.Name(), b.Name())
|
||||
}
|
||||
return int(b.State() - a.State())
|
||||
})
|
||||
return alerts
|
||||
}
|
||||
|
||||
// HasAlertingRules returns true if the group contains at least one AlertingRule.
|
||||
func (g *Group) HasAlertingRules() bool {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
|
||||
for _, rule := range g.rules {
|
||||
if _, ok := rule.(*AlertingRule); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetEvaluationTime returns the time in seconds it took to evaluate the rule group.
|
||||
func (g *Group) GetEvaluationTime() time.Duration {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.evaluationTime
|
||||
}
|
||||
|
||||
// setEvaluationTime sets the time in seconds the last evaluation took.
|
||||
func (g *Group) setEvaluationTime(dur time.Duration) {
|
||||
g.metrics.GroupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds())
|
||||
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.evaluationTime = dur
|
||||
}
|
||||
|
||||
// GetLastEvaluation returns the time the last evaluation of the rule group took place.
|
||||
func (g *Group) GetLastEvaluation() time.Time {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.lastEvaluation
|
||||
}
|
||||
|
||||
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
|
||||
func (g *Group) setLastEvaluation(ts time.Time) {
|
||||
g.metrics.GroupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
|
||||
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.lastEvaluation = ts
|
||||
}
|
||||
|
||||
// GetLastEvalTimestamp returns the timestamp of the last evaluation.
|
||||
func (g *Group) GetLastEvalTimestamp() time.Time {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.lastEvalTimestamp
|
||||
}
|
||||
|
||||
// setLastEvalTimestamp updates lastEvalTimestamp to the timestamp of the last evaluation.
|
||||
func (g *Group) setLastEvalTimestamp(ts time.Time) {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
g.lastEvalTimestamp = ts
|
||||
}
|
||||
|
||||
// EvalTimestamp returns the immediately preceding consistently slotted evaluation time.
|
||||
func (g *Group) EvalTimestamp(startTime int64) time.Time {
|
||||
var (
|
||||
offset = int64(g.hash() % uint64(g.interval))
|
||||
|
||||
// This group's evaluation times differ from the perfect time intervals by `offset` nanoseconds.
|
||||
// But we can only use `% interval` to align with the interval. And `% interval` will always
|
||||
// align with the perfect time intervals, instead of this group's. Because of this we add
|
||||
// `offset` _after_ aligning with the perfect time interval.
|
||||
//
|
||||
// There can be cases where adding `offset` to the perfect evaluation time can yield a
|
||||
// timestamp in the future, which is not what EvalTimestamp should do.
|
||||
// So we subtract one `offset` to make sure that `now - (now % interval) + offset` gives an
|
||||
// evaluation time in the past.
|
||||
adjNow = startTime - offset
|
||||
|
||||
// Adjust to perfect evaluation intervals.
|
||||
base = adjNow - (adjNow % int64(g.interval))
|
||||
|
||||
// Add one offset to randomize the evaluation times of this group.
|
||||
next = base + offset
|
||||
)
|
||||
|
||||
return time.Unix(0, next).UTC()
|
||||
}
|
||||
|
||||
func nameAndLabels(rule Rule) string {
|
||||
return rule.Name() + rule.Labels().String()
|
||||
}
|
||||
|
||||
// CopyState copies the alerting rule and staleness related state from the given group.
|
||||
//
|
||||
// Rules are matched based on their name and labels. If there are duplicates, the
|
||||
// first is matched with the first, second with the second etc.
|
||||
func (g *Group) CopyState(from *Group) {
|
||||
g.evaluationTime = from.evaluationTime
|
||||
g.lastEvaluation = from.lastEvaluation
|
||||
|
||||
ruleMap := make(map[string][]int, len(from.rules))
|
||||
|
||||
for fi, fromRule := range from.rules {
|
||||
nameAndLabels := nameAndLabels(fromRule)
|
||||
l := ruleMap[nameAndLabels]
|
||||
ruleMap[nameAndLabels] = append(l, fi)
|
||||
}
|
||||
|
||||
for i, rule := range g.rules {
|
||||
nameAndLabels := nameAndLabels(rule)
|
||||
indexes := ruleMap[nameAndLabels]
|
||||
if len(indexes) == 0 {
|
||||
continue
|
||||
}
|
||||
fi := indexes[0]
|
||||
g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
|
||||
ruleMap[nameAndLabels] = indexes[1:]
|
||||
|
||||
ar, ok := rule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
far, ok := from.rules[fi].(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for fp, a := range far.active {
|
||||
ar.active[fp] = a
|
||||
}
|
||||
}
|
||||
|
||||
// Handle deleted and unmatched duplicate rules.
|
||||
g.staleSeries = from.staleSeries
|
||||
for fi, fromRule := range from.rules {
|
||||
nameAndLabels := nameAndLabels(fromRule)
|
||||
l := ruleMap[nameAndLabels]
|
||||
if len(l) != 0 {
|
||||
for _, series := range from.seriesInPreviousEval[fi] {
|
||||
g.staleSeries = append(g.staleSeries, series)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
|
||||
func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
||||
var samplesTotal float64
|
||||
for i, rule := range g.rules {
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
func(i int, rule Rule) {
|
||||
ctx, sp := otel.Tracer("").Start(ctx, "rule")
|
||||
sp.SetAttributes(attribute.String("name", rule.Name()))
|
||||
defer func(t time.Time) {
|
||||
sp.End()
|
||||
|
||||
since := time.Since(t)
|
||||
g.metrics.EvalDuration.Observe(since.Seconds())
|
||||
rule.SetEvaluationDuration(since)
|
||||
rule.SetEvaluationTimestamp(t)
|
||||
}(time.Now())
|
||||
|
||||
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
// Canceled queries are intentional termination of queries. This normally
|
||||
// happens on shutdown and thus we skip logging of any errors here.
|
||||
var eqc promql.ErrQueryCanceled
|
||||
if !errors.As(err, &eqc) {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Evaluating rule failed", "rule", rule, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
rule.SetHealth(HealthGood)
|
||||
rule.SetLastError(nil)
|
||||
samplesTotal += float64(len(vector))
|
||||
|
||||
if ar, ok := rule.(*AlertingRule); ok {
|
||||
ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
|
||||
}
|
||||
var (
|
||||
numOutOfOrder = 0
|
||||
numTooOld = 0
|
||||
numDuplicates = 0
|
||||
)
|
||||
|
||||
app := g.opts.Appendable.Appender(ctx)
|
||||
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
|
||||
defer func() {
|
||||
if err := app.Commit(); err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule sample appending failed", "err", err)
|
||||
return
|
||||
}
|
||||
g.seriesInPreviousEval[i] = seriesReturned
|
||||
}()
|
||||
|
||||
for _, s := range vector {
|
||||
if s.H != nil {
|
||||
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
|
||||
} else {
|
||||
_, err = app.Append(0, s.Metric, s.T, s.F)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
sp.SetStatus(codes.Error, err.Error())
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
|
||||
numOutOfOrder++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
|
||||
numTooOld++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
numDuplicates++
|
||||
level.Debug(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
default:
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Rule evaluation result discarded", "err", err, "sample", s)
|
||||
}
|
||||
} else {
|
||||
buf := [1024]byte{}
|
||||
seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric
|
||||
}
|
||||
}
|
||||
if numOutOfOrder > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
|
||||
}
|
||||
if numTooOld > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld)
|
||||
}
|
||||
if numDuplicates > 0 {
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
|
||||
}
|
||||
|
||||
for metric, lset := range g.seriesInPreviousEval[i] {
|
||||
if _, ok := seriesReturned[metric]; !ok {
|
||||
// Series no longer exposed, mark it stale.
|
||||
_, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
|
||||
errors.Is(unwrappedErr, storage.ErrTooOldSample),
|
||||
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
level.Warn(g.logger).Log("name", rule.Name(), "index", i, "msg", "Adding stale sample failed", "sample", lset.String(), "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(i, rule)
|
||||
}
|
||||
if g.metrics != nil {
|
||||
g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal)
|
||||
}
|
||||
g.cleanupStaleSeries(ctx, ts)
|
||||
}
|
||||
|
||||
func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
|
||||
if len(g.staleSeries) == 0 {
|
||||
return
|
||||
}
|
||||
app := g.opts.Appendable.Appender(ctx)
|
||||
for _, s := range g.staleSeries {
|
||||
// Rule that produced series no longer configured, mark it stale.
|
||||
_, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
switch {
|
||||
case unwrappedErr == nil:
|
||||
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
|
||||
errors.Is(unwrappedErr, storage.ErrTooOldSample),
|
||||
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
|
||||
// Do not count these in logging, as this is expected if series
|
||||
// is exposed from a different rule.
|
||||
default:
|
||||
level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err)
|
||||
}
|
||||
}
|
||||
if err := app.Commit(); err != nil {
|
||||
level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err)
|
||||
} else {
|
||||
g.staleSeries = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreForState restores the 'for' state of the alerts
|
||||
// by looking up last ActiveAt from storage.
|
||||
func (g *Group) RestoreForState(ts time.Time) {
|
||||
maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
|
||||
// We allow restoration only if alerts were active before after certain time.
|
||||
mint := ts.Add(-g.opts.OutageTolerance)
|
||||
mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
|
||||
q, err := g.opts.Queryable.Querier(mintMS, maxtMS)
|
||||
if err != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := q.Close(); err != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, rule := range g.Rules() {
|
||||
alertRule, ok := rule.(*AlertingRule)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
alertHoldDuration := alertRule.HoldDuration()
|
||||
if alertHoldDuration < g.opts.ForGracePeriod {
|
||||
// If alertHoldDuration is already less than grace period, we would not
|
||||
// like to make it wait for `g.opts.ForGracePeriod` time before firing.
|
||||
// Hence we skip restoration, which will make it wait for alertHoldDuration.
|
||||
alertRule.SetRestored(true)
|
||||
continue
|
||||
}
|
||||
|
||||
alertRule.ForEachActiveAlert(func(a *Alert) {
|
||||
var s storage.Series
|
||||
|
||||
s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q)
|
||||
if err != nil {
|
||||
// Querier Warnings are ignored. We do not care unless we have an error.
|
||||
level.Error(g.logger).Log(
|
||||
"msg", "Failed to restore 'for' state",
|
||||
labels.AlertName, alertRule.Name(),
|
||||
"stage", "Select",
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Series found for the 'for' state.
|
||||
var t int64
|
||||
var v float64
|
||||
it := s.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v = it.At()
|
||||
}
|
||||
if it.Err() != nil {
|
||||
level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
|
||||
labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
|
||||
return
|
||||
}
|
||||
if value.IsStaleNaN(v) { // Alert was not active.
|
||||
return
|
||||
}
|
||||
|
||||
downAt := time.Unix(t/1000, 0).UTC()
|
||||
restoredActiveAt := time.Unix(int64(v), 0).UTC()
|
||||
timeSpentPending := downAt.Sub(restoredActiveAt)
|
||||
timeRemainingPending := alertHoldDuration - timeSpentPending
|
||||
|
||||
switch {
|
||||
case timeRemainingPending <= 0:
|
||||
// It means that alert was firing when prometheus went down.
|
||||
// In the next Eval, the state of this alert will be set back to
|
||||
// firing again if it's still firing in that Eval.
|
||||
// Nothing to be done in this case.
|
||||
case timeRemainingPending < g.opts.ForGracePeriod:
|
||||
// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
|
||||
// /* new firing time */ /* moving back by hold duration */
|
||||
//
|
||||
// Proof of correctness:
|
||||
// firingTime = restoredActiveAt.Add(alertHoldDuration)
|
||||
// = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
|
||||
// = ts + m.opts.ForGracePeriod
|
||||
//
|
||||
// Time remaining to fire = firingTime.Sub(ts)
|
||||
// = (ts + m.opts.ForGracePeriod) - ts
|
||||
// = m.opts.ForGracePeriod
|
||||
restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
|
||||
default:
|
||||
// By shifting ActiveAt to the future (ActiveAt + some_duration),
|
||||
// the total pending time from the original ActiveAt
|
||||
// would be `alertHoldDuration + some_duration`.
|
||||
// Here, some_duration = downDuration.
|
||||
downDuration := ts.Sub(downAt)
|
||||
restoredActiveAt = restoredActiveAt.Add(downDuration)
|
||||
}
|
||||
|
||||
a.ActiveAt = restoredActiveAt
|
||||
level.Debug(g.logger).Log("msg", "'for' state restored",
|
||||
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
|
||||
"labels", a.Labels.String())
|
||||
})
|
||||
|
||||
alertRule.SetRestored(true)
|
||||
}
|
||||
}
|
||||
|
||||
// Equals return if two groups are the same.
|
||||
func (g *Group) Equals(ng *Group) bool {
|
||||
if g.name != ng.name {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.file != ng.file {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.interval != ng.interval {
|
||||
return false
|
||||
}
|
||||
|
||||
if g.limit != ng.limit {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(g.rules) != len(ng.rules) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, gr := range g.rules {
|
||||
if gr.String() != ng.rules[i].String() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The Manager manages recording and alerting rules.
|
||||
type Manager struct {
|
||||
opts *ManagerOptions
|
||||
|
@ -1191,11 +325,6 @@ func (m *Manager) LoadGroups(
|
|||
return groups, nil
|
||||
}
|
||||
|
||||
// GroupKey group names need not be unique across filenames.
|
||||
func GroupKey(file, name string) string {
|
||||
return file + ";" + name
|
||||
}
|
||||
|
||||
// RuleGroups returns the list of manager's rule groups.
|
||||
func (m *Manager) RuleGroups() []*Group {
|
||||
m.mtx.RLock()
|
||||
|
|
64
rules/rule.go
Normal file
64
rules/rule.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
// RuleHealth describes the health state of a rule.
|
||||
type RuleHealth string
|
||||
|
||||
// The possible health states of a rule based on the last execution.
|
||||
const (
|
||||
HealthUnknown RuleHealth = "unknown"
|
||||
HealthGood RuleHealth = "ok"
|
||||
HealthBad RuleHealth = "err"
|
||||
)
|
||||
|
||||
// A Rule encapsulates a vector expression which is evaluated at a specified
|
||||
// interval and acted upon (currently either recorded or used for alerting).
|
||||
type Rule interface {
|
||||
Name() string
|
||||
// Labels of the rule.
|
||||
Labels() labels.Labels
|
||||
// Eval evaluates the rule, including any associated recording or alerting actions.
|
||||
Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error)
|
||||
// String returns a human-readable string representation of the rule.
|
||||
String() string
|
||||
// Query returns the rule query expression.
|
||||
Query() parser.Expr
|
||||
// SetLastError sets the current error experienced by the rule.
|
||||
SetLastError(error)
|
||||
// LastError returns the last error experienced by the rule.
|
||||
LastError() error
|
||||
// SetHealth sets the current health of the rule.
|
||||
SetHealth(RuleHealth)
|
||||
// Health returns the current health of the rule.
|
||||
Health() RuleHealth
|
||||
SetEvaluationDuration(time.Duration)
|
||||
// GetEvaluationDuration returns last evaluation duration.
|
||||
// NOTE: Used dynamically by rules.html template.
|
||||
GetEvaluationDuration() time.Duration
|
||||
SetEvaluationTimestamp(time.Time)
|
||||
// GetEvaluationTimestamp returns last evaluation timestamp.
|
||||
// NOTE: Used dynamically by rules.html template.
|
||||
GetEvaluationTimestamp() time.Time
|
||||
}
|
|
@ -34,80 +34,20 @@ import (
|
|||
"github.com/prometheus/prometheus/util/osutil"
|
||||
)
|
||||
|
||||
var targetMetadataCache = newMetadataMetricsCollector()
|
||||
|
||||
// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics.
|
||||
type MetadataMetricsCollector struct {
|
||||
CacheEntries *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
|
||||
scrapeManager *Manager
|
||||
}
|
||||
|
||||
func newMetadataMetricsCollector() *MetadataMetricsCollector {
|
||||
return &MetadataMetricsCollector{
|
||||
CacheEntries: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_entries",
|
||||
"Total number of metric metadata entries in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_bytes",
|
||||
"The number of bytes that are currently used for storing metric metadata in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetadataMetricsCollector) registerManager(m *Manager) {
|
||||
mc.scrapeManager = m
|
||||
}
|
||||
|
||||
// Describe sends the metrics descriptions to the channel.
|
||||
func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- mc.CacheEntries
|
||||
ch <- mc.CacheBytes
|
||||
}
|
||||
|
||||
// Collect creates and sends the metrics for the metadata cache.
|
||||
func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
if mc.scrapeManager == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for tset, targets := range mc.scrapeManager.TargetsActive() {
|
||||
var size, length int
|
||||
for _, t := range targets {
|
||||
size += t.MetadataSize()
|
||||
length += t.MetadataLength()
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(length),
|
||||
tset,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(size),
|
||||
tset,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// NewManager is the Manager constructor
|
||||
func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager {
|
||||
func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) {
|
||||
if o == nil {
|
||||
o = &Options{}
|
||||
}
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
sm, err := newScrapeMetrics(registerer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create scrape manager due to error: %w", err)
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
append: app,
|
||||
opts: o,
|
||||
|
@ -116,10 +56,12 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager
|
|||
scrapePools: make(map[string]*scrapePool),
|
||||
graceShut: make(chan struct{}),
|
||||
triggerReload: make(chan struct{}, 1),
|
||||
metrics: sm,
|
||||
}
|
||||
targetMetadataCache.registerManager(m)
|
||||
|
||||
return m
|
||||
m.metrics.setTargetMetadataCacheGatherer(m)
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Options are the configuration parameters to the scrape manager.
|
||||
|
@ -132,9 +74,6 @@ type Options struct {
|
|||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to enable protobuf negotiation with the client. Note that the client can already
|
||||
// send protobuf without needing to enable this.
|
||||
EnableProtobufNegotiation bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
|
||||
|
@ -157,6 +96,8 @@ type Manager struct {
|
|||
targetSets map[string][]*targetgroup.Group
|
||||
|
||||
triggerReload chan struct{}
|
||||
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
||||
// Run receives and saves target set updates and triggers the scraping loops reloading.
|
||||
|
@ -214,8 +155,10 @@ func (m *Manager) reload() {
|
|||
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
|
||||
continue
|
||||
}
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts)
|
||||
m.metrics.targetScrapePools.Inc()
|
||||
sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts, m.metrics)
|
||||
if err != nil {
|
||||
m.metrics.targetScrapePoolsFailed.Inc()
|
||||
level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
@ -492,10 +493,13 @@ scrape_configs:
|
|||
cfg3 = loadConfiguration(t, cfgText3)
|
||||
|
||||
ch = make(chan struct{}, 1)
|
||||
|
||||
testRegistry = prometheus.NewRegistry()
|
||||
)
|
||||
|
||||
opts := Options{}
|
||||
scrapeManager := NewManager(&opts, nil, nil)
|
||||
scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
|
||||
require.NoError(t, err)
|
||||
newLoop := func(scrapeLoopOptions) loop {
|
||||
ch <- struct{}{}
|
||||
return noopLoop()
|
||||
|
@ -512,6 +516,7 @@ scrape_configs:
|
|||
logger: nil,
|
||||
config: cfg1.ScrapeConfigs[0],
|
||||
client: http.DefaultClient,
|
||||
metrics: scrapeManager.metrics,
|
||||
}
|
||||
scrapeManager.scrapePools = map[string]*scrapePool{
|
||||
"job1": sp,
|
||||
|
@ -560,7 +565,9 @@ scrape_configs:
|
|||
|
||||
func TestManagerTargetsUpdates(t *testing.T) {
|
||||
opts := Options{}
|
||||
m := NewManager(&opts, nil, nil)
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
m, err := NewManager(&opts, nil, nil, testRegistry)
|
||||
require.NoError(t, err)
|
||||
|
||||
ts := make(chan map[string][]*targetgroup.Group)
|
||||
go m.Run(ts)
|
||||
|
@ -613,7 +620,9 @@ global:
|
|||
}
|
||||
|
||||
opts := Options{}
|
||||
scrapeManager := NewManager(&opts, nil, nil)
|
||||
testRegistry := prometheus.NewRegistry()
|
||||
scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Load the first config.
|
||||
cfg1 := getConfig("ha1")
|
||||
|
@ -658,8 +667,9 @@ scrape_configs:
|
|||
- targets: ["foo:9093"]
|
||||
`
|
||||
var (
|
||||
cfg1 = loadConfiguration(t, cfgText1)
|
||||
cfg2 = loadConfiguration(t, cfgText2)
|
||||
cfg1 = loadConfiguration(t, cfgText1)
|
||||
cfg2 = loadConfiguration(t, cfgText2)
|
||||
testRegistry = prometheus.NewRegistry()
|
||||
)
|
||||
|
||||
reload := func(scrapeManager *Manager, cfg *config.Config) {
|
||||
|
@ -695,7 +705,8 @@ scrape_configs:
|
|||
}
|
||||
|
||||
opts := Options{}
|
||||
scrapeManager := NewManager(&opts, nil, nil)
|
||||
scrapeManager, err := NewManager(&opts, nil, nil, testRegistry)
|
||||
require.NoError(t, err)
|
||||
|
||||
reload(scrapeManager, cfg1)
|
||||
require.ElementsMatch(t, []string{"job1", "job2"}, scrapeManager.ScrapePools())
|
||||
|
|
307
scrape/metrics.go
Normal file
307
scrape/metrics.go
Normal file
|
@ -0,0 +1,307 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scrape
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type scrapeMetrics struct {
|
||||
// Used by Manager.
|
||||
targetMetadataCache *MetadataMetricsCollector
|
||||
targetScrapePools prometheus.Counter
|
||||
targetScrapePoolsFailed prometheus.Counter
|
||||
|
||||
// Used by scrapePool.
|
||||
targetReloadIntervalLength *prometheus.SummaryVec
|
||||
targetScrapePoolReloads prometheus.Counter
|
||||
targetScrapePoolReloadsFailed prometheus.Counter
|
||||
targetScrapePoolSyncsCounter *prometheus.CounterVec
|
||||
targetScrapePoolExceededTargetLimit prometheus.Counter
|
||||
targetScrapePoolTargetLimit *prometheus.GaugeVec
|
||||
targetScrapePoolTargetsAdded *prometheus.GaugeVec
|
||||
targetSyncIntervalLength *prometheus.SummaryVec
|
||||
targetSyncFailed *prometheus.CounterVec
|
||||
|
||||
// Used by targetScraper.
|
||||
targetScrapeExceededBodySizeLimit prometheus.Counter
|
||||
|
||||
// Used by scrapeCache.
|
||||
targetScrapeCacheFlushForced prometheus.Counter
|
||||
|
||||
// Used by scrapeLoop.
|
||||
targetIntervalLength *prometheus.SummaryVec
|
||||
targetScrapeSampleLimit prometheus.Counter
|
||||
targetScrapeSampleDuplicate prometheus.Counter
|
||||
targetScrapeSampleOutOfOrder prometheus.Counter
|
||||
targetScrapeSampleOutOfBounds prometheus.Counter
|
||||
targetScrapeExemplarOutOfOrder prometheus.Counter
|
||||
targetScrapePoolExceededLabelLimits prometheus.Counter
|
||||
targetScrapeNativeHistogramBucketLimit prometheus.Counter
|
||||
}
|
||||
|
||||
func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
|
||||
sm := &scrapeMetrics{}
|
||||
|
||||
// Manager metrics.
|
||||
sm.targetMetadataCache = &MetadataMetricsCollector{
|
||||
CacheEntries: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_entries",
|
||||
"Total number of metric metadata entries in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
"prometheus_target_metadata_cache_bytes",
|
||||
"The number of bytes that are currently used for storing metric metadata in the cache",
|
||||
[]string{"scrape_job"},
|
||||
nil,
|
||||
),
|
||||
// TargetsGatherer should be set later, because it's a circular dependency.
|
||||
// newScrapeMetrics() is called by NewManager(), while also TargetsGatherer is the new Manager.
|
||||
}
|
||||
|
||||
sm.targetScrapePools = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pools_total",
|
||||
Help: "Total number of scrape pool creation attempts.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapePoolsFailed = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pools_failed_total",
|
||||
Help: "Total number of scrape pool creations that failed.",
|
||||
},
|
||||
)
|
||||
|
||||
// Used by scrapePool.
|
||||
sm.targetReloadIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_reload_length_seconds",
|
||||
Help: "Actual interval to reload the scrape pool with a given configuration.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"interval"},
|
||||
)
|
||||
sm.targetScrapePoolReloads = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_reloads_total",
|
||||
Help: "Total number of scrape pool reloads.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapePoolReloadsFailed = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_reloads_failed_total",
|
||||
Help: "Total number of failed scrape pool reloads.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapePoolExceededTargetLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_exceeded_target_limit_total",
|
||||
Help: "Total number of times scrape pools hit the target limit, during sync or config reload.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapePoolTargetLimit = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_target_limit",
|
||||
Help: "Maximum number of targets allowed in this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolTargetsAdded = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_targets",
|
||||
Help: "Current number of targets in this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_sync_total",
|
||||
Help: "Total number of syncs that were executed on a scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetSyncIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_sync_length_seconds",
|
||||
Help: "Actual interval to sync the scrape pool.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetSyncFailed = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_sync_failed_total",
|
||||
Help: "Total number of target sync failures.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
|
||||
// Used by targetScraper.
|
||||
sm.targetScrapeExceededBodySizeLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_body_size_limit_total",
|
||||
Help: "Total number of scrapes that hit the body size limit",
|
||||
},
|
||||
)
|
||||
|
||||
// Used by scrapeCache.
|
||||
sm.targetScrapeCacheFlushForced = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_cache_flush_forced_total",
|
||||
Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.",
|
||||
},
|
||||
)
|
||||
|
||||
// Used by scrapeLoop.
|
||||
sm.targetIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_interval_length_seconds",
|
||||
Help: "Actual intervals between scrapes.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"interval"},
|
||||
)
|
||||
sm.targetScrapeSampleLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_sample_limit_total",
|
||||
Help: "Total number of scrapes that hit the sample limit and were rejected.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapeSampleDuplicate = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total",
|
||||
Help: "Total number of samples rejected due to duplicate timestamps but different values.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapeSampleOutOfOrder = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_out_of_order_total",
|
||||
Help: "Total number of samples rejected due to not being out of the expected order.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapeSampleOutOfBounds = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_out_of_bounds_total",
|
||||
Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapePoolExceededLabelLimits = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_exceeded_label_limits_total",
|
||||
Help: "Total number of times scrape pools hit the label limits, during sync or config reload.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total",
|
||||
Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.",
|
||||
},
|
||||
)
|
||||
sm.targetScrapeExemplarOutOfOrder = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exemplar_out_of_order_total",
|
||||
Help: "Total number of exemplar rejected due to not being out of the expected order.",
|
||||
},
|
||||
)
|
||||
|
||||
for _, collector := range []prometheus.Collector{
|
||||
// Used by Manager.
|
||||
sm.targetMetadataCache,
|
||||
sm.targetScrapePools,
|
||||
sm.targetScrapePoolsFailed,
|
||||
// Used by scrapePool.
|
||||
sm.targetReloadIntervalLength,
|
||||
sm.targetScrapePoolReloads,
|
||||
sm.targetScrapePoolReloadsFailed,
|
||||
sm.targetSyncIntervalLength,
|
||||
sm.targetScrapePoolSyncsCounter,
|
||||
sm.targetScrapePoolExceededTargetLimit,
|
||||
sm.targetScrapePoolTargetLimit,
|
||||
sm.targetScrapePoolTargetsAdded,
|
||||
sm.targetSyncFailed,
|
||||
// Used by targetScraper.
|
||||
sm.targetScrapeExceededBodySizeLimit,
|
||||
// Used by scrapeCache.
|
||||
sm.targetScrapeCacheFlushForced,
|
||||
// Used by scrapeLoop.
|
||||
sm.targetIntervalLength,
|
||||
sm.targetScrapeSampleLimit,
|
||||
sm.targetScrapeSampleDuplicate,
|
||||
sm.targetScrapeSampleOutOfOrder,
|
||||
sm.targetScrapeSampleOutOfBounds,
|
||||
sm.targetScrapeExemplarOutOfOrder,
|
||||
sm.targetScrapePoolExceededLabelLimits,
|
||||
sm.targetScrapeNativeHistogramBucketLimit,
|
||||
} {
|
||||
err := reg.Register(collector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register scrape metrics: %w", err)
|
||||
}
|
||||
}
|
||||
return sm, nil
|
||||
}
|
||||
|
||||
func (sm *scrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer) {
|
||||
sm.targetMetadataCache.TargetsGatherer = gatherer
|
||||
}
|
||||
|
||||
type TargetsGatherer interface {
|
||||
TargetsActive() map[string][]*Target
|
||||
}
|
||||
|
||||
// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics.
|
||||
type MetadataMetricsCollector struct {
|
||||
CacheEntries *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
TargetsGatherer TargetsGatherer
|
||||
}
|
||||
|
||||
// Describe sends the metrics descriptions to the channel.
|
||||
func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- mc.CacheEntries
|
||||
ch <- mc.CacheBytes
|
||||
}
|
||||
|
||||
// Collect creates and sends the metrics for the metadata cache.
|
||||
func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
if mc.TargetsGatherer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for tset, targets := range mc.TargetsGatherer.TargetsActive() {
|
||||
var size, length int
|
||||
for _, t := range targets {
|
||||
size += t.MetadataSize()
|
||||
length += t.MetadataLength()
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(length),
|
||||
tset,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
mc.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(size),
|
||||
tset,
|
||||
)
|
||||
}
|
||||
}
|
348
scrape/scrape.go
348
scrape/scrape.go
|
@ -24,13 +24,13 @@ import (
|
|||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -60,172 +60,6 @@ var AlignScrapeTimestamps = true
|
|||
|
||||
var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName)
|
||||
|
||||
var (
|
||||
targetIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_interval_length_seconds",
|
||||
Help: "Actual intervals between scrapes.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"interval"},
|
||||
)
|
||||
targetReloadIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_reload_length_seconds",
|
||||
Help: "Actual interval to reload the scrape pool with a given configuration.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"interval"},
|
||||
)
|
||||
targetScrapePools = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pools_total",
|
||||
Help: "Total number of scrape pool creation attempts.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolsFailed = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pools_failed_total",
|
||||
Help: "Total number of scrape pool creations that failed.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolReloads = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_reloads_total",
|
||||
Help: "Total number of scrape pool reloads.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolReloadsFailed = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_reloads_failed_total",
|
||||
Help: "Total number of failed scrape pool reloads.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolExceededTargetLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_exceeded_target_limit_total",
|
||||
Help: "Total number of times scrape pools hit the target limit, during sync or config reload.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolTargetLimit = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_target_limit",
|
||||
Help: "Maximum number of targets allowed in this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetScrapePoolTargetsAdded = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_targets",
|
||||
Help: "Current number of targets in this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetSyncIntervalLength = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "prometheus_target_sync_length_seconds",
|
||||
Help: "Actual interval to sync the scrape pool.",
|
||||
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_sync_total",
|
||||
Help: "Total number of syncs that were executed on a scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetScrapeExceededBodySizeLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_body_size_limit_total",
|
||||
Help: "Total number of scrapes that hit the body size limit",
|
||||
},
|
||||
)
|
||||
targetScrapeSampleLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_sample_limit_total",
|
||||
Help: "Total number of scrapes that hit the sample limit and were rejected.",
|
||||
},
|
||||
)
|
||||
targetScrapeSampleDuplicate = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total",
|
||||
Help: "Total number of samples rejected due to duplicate timestamps but different values.",
|
||||
},
|
||||
)
|
||||
targetScrapeSampleOutOfOrder = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_out_of_order_total",
|
||||
Help: "Total number of samples rejected due to not being out of the expected order.",
|
||||
},
|
||||
)
|
||||
targetScrapeSampleOutOfBounds = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_sample_out_of_bounds_total",
|
||||
Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.",
|
||||
},
|
||||
)
|
||||
targetScrapeCacheFlushForced = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_cache_flush_forced_total",
|
||||
Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.",
|
||||
},
|
||||
)
|
||||
targetScrapeExemplarOutOfOrder = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exemplar_out_of_order_total",
|
||||
Help: "Total number of exemplar rejected due to not being out of the expected order.",
|
||||
},
|
||||
)
|
||||
targetScrapePoolExceededLabelLimits = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_exceeded_label_limits_total",
|
||||
Help: "Total number of times scrape pools hit the label limits, during sync or config reload.",
|
||||
},
|
||||
)
|
||||
targetSyncFailed = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_sync_failed_total",
|
||||
Help: "Total number of target sync failures.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total",
|
||||
Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(
|
||||
targetIntervalLength,
|
||||
targetReloadIntervalLength,
|
||||
targetScrapePools,
|
||||
targetScrapePoolsFailed,
|
||||
targetScrapePoolReloads,
|
||||
targetScrapePoolReloadsFailed,
|
||||
targetSyncIntervalLength,
|
||||
targetScrapePoolSyncsCounter,
|
||||
targetScrapeExceededBodySizeLimit,
|
||||
targetScrapeSampleLimit,
|
||||
targetScrapeSampleDuplicate,
|
||||
targetScrapeSampleOutOfOrder,
|
||||
targetScrapeSampleOutOfBounds,
|
||||
targetScrapePoolExceededTargetLimit,
|
||||
targetScrapePoolTargetLimit,
|
||||
targetScrapePoolTargetsAdded,
|
||||
targetScrapeCacheFlushForced,
|
||||
targetMetadataCache,
|
||||
targetScrapeExemplarOutOfOrder,
|
||||
targetScrapePoolExceededLabelLimits,
|
||||
targetSyncFailed,
|
||||
targetScrapeNativeHistogramBucketLimit,
|
||||
)
|
||||
}
|
||||
|
||||
// scrapePool manages scrapes for sets of targets.
|
||||
type scrapePool struct {
|
||||
appendable storage.Appendable
|
||||
|
@ -251,7 +85,7 @@ type scrapePool struct {
|
|||
|
||||
noDefaultPort bool
|
||||
|
||||
enableProtobufNegotiation bool
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -280,15 +114,13 @@ const maxAheadTime = 10 * time.Minute
|
|||
// returning an empty label set is interpreted as "drop"
|
||||
type labelsMutator func(labels.Labels) labels.Labels
|
||||
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) {
|
||||
targetScrapePools.Inc()
|
||||
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
}
|
||||
|
||||
|
@ -296,22 +128,22 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sp := &scrapePool{
|
||||
cancel: cancel,
|
||||
appendable: app,
|
||||
config: cfg,
|
||||
client: client,
|
||||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
enableProtobufNegotiation: options.EnableProtobufNegotiation,
|
||||
cancel: cancel,
|
||||
appendable: app,
|
||||
config: cfg,
|
||||
client: client,
|
||||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
metrics: metrics,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
cache := opts.cache
|
||||
if cache == nil {
|
||||
cache = newScrapeCache()
|
||||
cache = newScrapeCache(metrics)
|
||||
}
|
||||
opts.target.SetMetadataStore(cache)
|
||||
|
||||
|
@ -338,9 +170,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
options.EnableMetadataStorage,
|
||||
opts.target,
|
||||
options.PassMetadataInContext,
|
||||
metrics,
|
||||
)
|
||||
}
|
||||
targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
return sp, nil
|
||||
}
|
||||
|
||||
|
@ -395,11 +228,11 @@ func (sp *scrapePool) stop() {
|
|||
sp.client.CloseIdleConnections()
|
||||
|
||||
if sp.config != nil {
|
||||
targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
||||
targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
||||
targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
||||
targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
||||
targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,12 +242,12 @@ func (sp *scrapePool) stop() {
|
|||
func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||
sp.mtx.Lock()
|
||||
defer sp.mtx.Unlock()
|
||||
targetScrapePoolReloads.Inc()
|
||||
sp.metrics.targetScrapePoolReloads.Inc()
|
||||
start := time.Now()
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...)
|
||||
if err != nil {
|
||||
targetScrapePoolReloadsFailed.Inc()
|
||||
sp.metrics.targetScrapePoolReloadsFailed.Inc()
|
||||
return errors.Wrap(err, "error creating HTTP client")
|
||||
}
|
||||
|
||||
|
@ -423,7 +256,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
oldClient := sp.client
|
||||
sp.client = client
|
||||
|
||||
targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
|
@ -451,17 +284,19 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
oldLoop.disableEndOfRunStalenessMarkers()
|
||||
cache = oc
|
||||
} else {
|
||||
cache = newScrapeCache()
|
||||
cache = newScrapeCache(sp.metrics)
|
||||
}
|
||||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
var (
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
s = &targetScraper{
|
||||
Target: t,
|
||||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
||||
}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -496,7 +331,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
|
||||
wg.Wait()
|
||||
oldClient.CloseIdleConnections()
|
||||
targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||
sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
return nil
|
||||
|
@ -520,7 +355,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|||
for _, err := range failures {
|
||||
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
|
||||
}
|
||||
targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
|
||||
sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
|
||||
for _, t := range targets {
|
||||
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
|
||||
nonEmpty := false
|
||||
|
@ -539,10 +374,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|||
sp.targetMtx.Unlock()
|
||||
sp.sync(all)
|
||||
|
||||
targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
||||
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
||||
sp.metrics.targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
||||
}
|
||||
|
||||
// sync takes a list of potentially duplicated targets, deduplicates them, starts
|
||||
|
@ -577,11 +412,14 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
// for every target.
|
||||
var err error
|
||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
s := &targetScraper{
|
||||
Target: t,
|
||||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
metrics: sp.metrics,
|
||||
}
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -632,7 +470,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
|
||||
sp.targetMtx.Unlock()
|
||||
|
||||
targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops)))
|
||||
sp.metrics.targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops)))
|
||||
forcedErr := sp.refreshTargetLimitErr()
|
||||
for _, l := range sp.loops {
|
||||
l.setForcedError(forcedErr)
|
||||
|
@ -656,7 +494,7 @@ func (sp *scrapePool) refreshTargetLimitErr() error {
|
|||
return nil
|
||||
}
|
||||
if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) {
|
||||
targetScrapePoolExceededTargetLimit.Inc()
|
||||
sp.metrics.targetScrapePoolExceededTargetLimit.Inc()
|
||||
return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit)
|
||||
}
|
||||
return nil
|
||||
|
@ -785,7 +623,8 @@ func appender(app storage.Appender, sampleLimit, bucketLimit int) storage.Append
|
|||
|
||||
// A scraper retrieves samples and accepts a status report at the end.
|
||||
type scraper interface {
|
||||
scrape(ctx context.Context, w io.Writer) (string, error)
|
||||
scrape(ctx context.Context) (*http.Response, error)
|
||||
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
|
||||
Report(start time.Time, dur time.Duration, err error)
|
||||
offset(interval time.Duration, offsetSeed uint64) time.Duration
|
||||
}
|
||||
|
@ -803,22 +642,34 @@ type targetScraper struct {
|
|||
|
||||
bodySizeLimit int64
|
||||
acceptHeader string
|
||||
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
||||
var errBodySizeLimit = errors.New("body size limit exceeded")
|
||||
|
||||
const (
|
||||
scrapeAcceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
)
|
||||
// acceptHeader transforms preference from the options into specific header values as
|
||||
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
|
||||
// No validation is here, we expect scrape protocols to be validated already.
|
||||
func acceptHeader(sps []config.ScrapeProtocol) string {
|
||||
var vals []string
|
||||
weight := len(config.ScrapeProtocolsHeaders) + 1
|
||||
for _, sp := range sps {
|
||||
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight))
|
||||
weight--
|
||||
}
|
||||
// Default match anything.
|
||||
vals = append(vals, fmt.Sprintf("*/*;q=%d", weight))
|
||||
return strings.Join(vals, ",")
|
||||
}
|
||||
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
|
||||
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
|
||||
if s.req == nil {
|
||||
req, err := http.NewRequest("GET", s.URL().String(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Accept", s.acceptHeader)
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
|
@ -828,10 +679,10 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
s.req = req
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(s.req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s.client.Do(s.req.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
|
||||
defer func() {
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
|
@ -850,7 +701,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
return "", err
|
||||
}
|
||||
if n >= s.bodySizeLimit {
|
||||
targetScrapeExceededBodySizeLimit.Inc()
|
||||
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
||||
return "", errBodySizeLimit
|
||||
}
|
||||
return resp.Header.Get("Content-Type"), nil
|
||||
|
@ -858,13 +709,14 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
|
||||
if s.gzipr == nil {
|
||||
s.buf = bufio.NewReader(resp.Body)
|
||||
var err error
|
||||
s.gzipr, err = gzip.NewReader(s.buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
s.buf.Reset(resp.Body)
|
||||
if err = s.gzipr.Reset(s.buf); err != nil {
|
||||
if err := s.gzipr.Reset(s.buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
@ -875,7 +727,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
return "", err
|
||||
}
|
||||
if n >= s.bodySizeLimit {
|
||||
targetScrapeExceededBodySizeLimit.Inc()
|
||||
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
||||
return "", errBodySizeLimit
|
||||
}
|
||||
return resp.Header.Get("Content-Type"), nil
|
||||
|
@ -928,6 +780,8 @@ type scrapeLoop struct {
|
|||
|
||||
reportExtraMetrics bool
|
||||
appendMetadataToWAL bool
|
||||
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
||||
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
||||
|
@ -955,6 +809,8 @@ type scrapeCache struct {
|
|||
|
||||
metaMtx sync.Mutex
|
||||
metadata map[string]*metaEntry
|
||||
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
||||
// metaEntry holds meta information about a metric.
|
||||
|
@ -970,13 +826,14 @@ func (m *metaEntry) size() int {
|
|||
return len(m.Help) + len(m.Unit) + len(m.Type)
|
||||
}
|
||||
|
||||
func newScrapeCache() *scrapeCache {
|
||||
func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
|
||||
return &scrapeCache{
|
||||
series: map[string]*cacheEntry{},
|
||||
droppedSeries: map[string]*uint64{},
|
||||
seriesCur: map[uint64]labels.Labels{},
|
||||
seriesPrev: map[uint64]labels.Labels{},
|
||||
metadata: map[string]*metaEntry{},
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -995,7 +852,7 @@ func (c *scrapeCache) iterDone(flushCache bool) {
|
|||
// since the last scrape, and allow an additional 1000 in case
|
||||
// initial scrapes all fail.
|
||||
flushCache = true
|
||||
targetScrapeCacheFlushForced.Inc()
|
||||
c.metrics.targetScrapeCacheFlushForced.Inc()
|
||||
}
|
||||
|
||||
if flushCache {
|
||||
|
@ -1199,6 +1056,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
appendMetadataToWAL bool,
|
||||
target *Target,
|
||||
passMetadataInContext bool,
|
||||
metrics *scrapeMetrics,
|
||||
) *scrapeLoop {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
|
@ -1207,7 +1065,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
||||
}
|
||||
if cache == nil {
|
||||
cache = newScrapeCache()
|
||||
cache = newScrapeCache(metrics)
|
||||
}
|
||||
|
||||
appenderCtx := ctx
|
||||
|
@ -1242,6 +1100,7 @@ func newScrapeLoop(ctx context.Context,
|
|||
scrapeClassicHistograms: scrapeClassicHistograms,
|
||||
reportExtraMetrics: reportExtraMetrics,
|
||||
appendMetadataToWAL: appendMetadataToWAL,
|
||||
metrics: metrics,
|
||||
}
|
||||
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
||||
|
||||
|
@ -1321,16 +1180,12 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
|
||||
// Only record after the first scrape.
|
||||
if !last.IsZero() {
|
||||
targetIntervalLength.WithLabelValues(sl.interval.String()).Observe(
|
||||
sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe(
|
||||
time.Since(last).Seconds(),
|
||||
)
|
||||
}
|
||||
|
||||
b := sl.buffers.Get(sl.lastScrapeSize).([]byte)
|
||||
defer sl.buffers.Put(b)
|
||||
buf := bytes.NewBuffer(b)
|
||||
|
||||
var total, added, seriesAdded, bytes int
|
||||
var total, added, seriesAdded, bytesRead int
|
||||
var err, appErr, scrapeErr error
|
||||
|
||||
app := sl.appender(sl.appenderCtx)
|
||||
|
@ -1346,7 +1201,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
}()
|
||||
|
||||
defer func() {
|
||||
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytes, scrapeErr); err != nil {
|
||||
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
|
||||
level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err)
|
||||
}
|
||||
}()
|
||||
|
@ -1367,8 +1222,17 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
}
|
||||
|
||||
var contentType string
|
||||
var resp *http.Response
|
||||
var b []byte
|
||||
var buf *bytes.Buffer
|
||||
scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout)
|
||||
contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf)
|
||||
resp, scrapeErr = sl.scraper.scrape(scrapeCtx)
|
||||
if scrapeErr == nil {
|
||||
b = sl.buffers.Get(sl.lastScrapeSize).([]byte)
|
||||
defer sl.buffers.Put(b)
|
||||
buf = bytes.NewBuffer(b)
|
||||
contentType, scrapeErr = sl.scraper.readResponse(scrapeCtx, resp, buf)
|
||||
}
|
||||
cancel()
|
||||
|
||||
if scrapeErr == nil {
|
||||
|
@ -1379,14 +1243,14 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er
|
|||
if len(b) > 0 {
|
||||
sl.lastScrapeSize = len(b)
|
||||
}
|
||||
bytes = len(b)
|
||||
bytesRead = len(b)
|
||||
} else {
|
||||
level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr)
|
||||
if errc != nil {
|
||||
errc <- scrapeErr
|
||||
}
|
||||
if errors.Is(scrapeErr, errBodySizeLimit) {
|
||||
bytes = -1
|
||||
bytesRead = -1
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1525,6 +1389,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
bucketLimitErr error
|
||||
lset labels.Labels // escapes to heap so hoisted out of loop
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
meta metadata.Metadata
|
||||
metadataChanged bool
|
||||
|
@ -1622,7 +1487,6 @@ loop:
|
|||
ce, ok := sl.cache.get(met)
|
||||
var (
|
||||
ref storage.SeriesRef
|
||||
lset labels.Labels
|
||||
hash uint64
|
||||
)
|
||||
|
||||
|
@ -1657,7 +1521,7 @@ loop:
|
|||
|
||||
// If any label limits is exceeded the scrape should fail.
|
||||
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
|
||||
targetScrapePoolExceededLabelLimits.Inc()
|
||||
sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
|
||||
break loop
|
||||
}
|
||||
|
||||
|
@ -1722,14 +1586,14 @@ loop:
|
|||
err = sampleLimitErr
|
||||
}
|
||||
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||
targetScrapeSampleLimit.Inc()
|
||||
sl.metrics.targetScrapeSampleLimit.Inc()
|
||||
}
|
||||
if bucketLimitErr != nil {
|
||||
if err == nil {
|
||||
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
|
||||
}
|
||||
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
||||
targetScrapeNativeHistogramBucketLimit.Inc()
|
||||
sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc()
|
||||
}
|
||||
if appErrs.numOutOfOrder > 0 {
|
||||
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
|
||||
|
@ -1773,17 +1637,17 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e
|
|||
case storage.ErrOutOfOrderSample:
|
||||
appErrs.numOutOfOrder++
|
||||
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
|
||||
targetScrapeSampleOutOfOrder.Inc()
|
||||
sl.metrics.targetScrapeSampleOutOfOrder.Inc()
|
||||
return false, nil
|
||||
case storage.ErrDuplicateSampleForTimestamp:
|
||||
appErrs.numDuplicates++
|
||||
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
|
||||
targetScrapeSampleDuplicate.Inc()
|
||||
sl.metrics.targetScrapeSampleDuplicate.Inc()
|
||||
return false, nil
|
||||
case storage.ErrOutOfBounds:
|
||||
appErrs.numOutOfBounds++
|
||||
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
|
||||
targetScrapeSampleOutOfBounds.Inc()
|
||||
sl.metrics.targetScrapeSampleOutOfBounds.Inc()
|
||||
return false, nil
|
||||
case errSampleLimit:
|
||||
// Keep on parsing output if we hit the limit, so we report the correct
|
||||
|
@ -1807,7 +1671,7 @@ func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appE
|
|||
case storage.ErrOutOfOrderExemplar:
|
||||
appErrs.numExemplarOutOfOrder++
|
||||
level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
||||
targetScrapeExemplarOutOfOrder.Inc()
|
||||
sl.metrics.targetScrapeExemplarOutOfOrder.Inc()
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
|
|
|
@ -57,11 +57,18 @@ func TestMain(m *testing.M) {
|
|||
testutil.TolerantVerifyLeak(m)
|
||||
}
|
||||
|
||||
func newTestScrapeMetrics(t testing.TB) *scrapeMetrics {
|
||||
reg := prometheus.NewRegistry()
|
||||
metrics, err := newScrapeMetrics(reg)
|
||||
require.NoError(t, err)
|
||||
return metrics
|
||||
}
|
||||
|
||||
func TestNewScrapePool(t *testing.T) {
|
||||
var (
|
||||
app = &nopAppendable{}
|
||||
cfg = &config.ScrapeConfig{}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
)
|
||||
|
||||
if a, ok := sp.appendable.(*nopAppendable); !ok || a != app {
|
||||
|
@ -97,7 +104,7 @@ func TestDroppedTargetsList(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
expectedLabelSetString = "{__address__=\"127.0.0.1:9090\", __scrape_interval__=\"0s\", __scrape_timeout__=\"0s\", job=\"dropMe\"}"
|
||||
expectedLength = 2
|
||||
)
|
||||
|
@ -117,7 +124,10 @@ func TestDroppedTargetsList(t *testing.T) {
|
|||
// TestDiscoveredLabelsUpdate checks that DiscoveredLabels are updated
|
||||
// even when new labels don't affect the target `hash`.
|
||||
func TestDiscoveredLabelsUpdate(t *testing.T) {
|
||||
sp := &scrapePool{}
|
||||
sp := &scrapePool{
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
|
||||
// These are used when syncing so need this to avoid a panic.
|
||||
sp.config = &config.ScrapeConfig{
|
||||
ScrapeInterval: model.Duration(1),
|
||||
|
@ -184,6 +194,7 @@ func TestScrapePoolStop(t *testing.T) {
|
|||
loops: map[uint64]loop{},
|
||||
cancel: func() {},
|
||||
client: http.DefaultClient,
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
var mtx sync.Mutex
|
||||
stopped := map[uint64]bool{}
|
||||
|
@ -262,6 +273,7 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
}
|
||||
return l
|
||||
}
|
||||
|
||||
sp := &scrapePool{
|
||||
appendable: &nopAppendable{},
|
||||
activeTargets: map[uint64]*Target{},
|
||||
|
@ -269,6 +281,7 @@ func TestScrapePoolReload(t *testing.T) {
|
|||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
client: http.DefaultClient,
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
|
||||
// Reloading a scrape pool with a new scrape configuration must stop all scrape
|
||||
|
@ -352,6 +365,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
|||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
client: http.DefaultClient,
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
|
||||
err := sp.reload(reloadCfg)
|
||||
|
@ -381,6 +395,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
newLoop: newLoop,
|
||||
logger: log.NewNopLogger(),
|
||||
client: http.DefaultClient,
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
|
||||
tgs := []*targetgroup.Group{}
|
||||
|
@ -489,7 +504,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
|||
func TestScrapePoolAppender(t *testing.T) {
|
||||
cfg := &config.ScrapeConfig{}
|
||||
app := &nopAppendable{}
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
sp, _ := newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
|
||||
loop := sp.newLoop(scrapeLoopOptions{
|
||||
target: &Target{},
|
||||
|
@ -545,7 +560,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
|||
newConfig := func() *config.ScrapeConfig {
|
||||
return &config.ScrapeConfig{ScrapeInterval: interval, ScrapeTimeout: timeout}
|
||||
}
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{})
|
||||
sp, _ := newScrapePool(newConfig(), &nopAppendable{}, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
|
@ -595,6 +610,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
|
|||
newLoop: newLoop,
|
||||
logger: nil,
|
||||
client: http.DefaultClient,
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
|
||||
tgs := []*targetgroup.Group{
|
||||
|
@ -643,6 +659,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
// The scrape pool synchronizes on stopping scrape loops. However, new scrape
|
||||
|
@ -716,6 +733,7 @@ func TestScrapeLoopStop(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
// Terminate loop after 2 scrapes.
|
||||
|
@ -793,6 +811,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
// The loop must terminate during the initial offset if the context
|
||||
|
@ -849,6 +868,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
go func() {
|
||||
|
@ -909,6 +929,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
forcedErr := fmt.Errorf("forced err")
|
||||
|
@ -945,7 +966,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
var (
|
||||
signal = make(chan struct{})
|
||||
scraper = &testScraper{}
|
||||
cache = newScrapeCache()
|
||||
cache = newScrapeCache(newTestScrapeMetrics(t))
|
||||
)
|
||||
defer close(signal)
|
||||
|
||||
|
@ -968,6 +989,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -1026,6 +1048,7 @@ func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
t.Cleanup(func() { cancel() })
|
||||
|
||||
|
@ -1087,6 +1110,7 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
|
@ -1166,6 +1190,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
// Succeed once, several failures, then stop.
|
||||
numScrapes := 0
|
||||
|
@ -1230,6 +1255,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
// Succeed once, several failures, then stop.
|
||||
|
@ -1297,6 +1323,7 @@ func TestScrapeLoopCache(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -1381,6 +1408,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -1496,6 +1524,7 @@ func TestScrapeLoopAppend(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1583,7 +1612,8 @@ func TestScrapeLoopAppendForConflictingPrefixedLabels(t *testing.T) {
|
|||
return mutateSampleLabels(l, &Target{labels: labels.FromStrings(tc.targetLabels...)}, false, nil)
|
||||
},
|
||||
nil,
|
||||
func(ctx context.Context) storage.Appender { return app }, nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false,
|
||||
func(ctx context.Context) storage.Appender { return app },
|
||||
nil, 0, true, 0, 0, nil, 0, 0, false, false, false, nil, false, newTestScrapeMetrics(t),
|
||||
)
|
||||
slApp := sl.appender(context.Background())
|
||||
_, _, _, err := sl.append(slApp, []byte(tc.exposedLabels), "", time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC))
|
||||
|
@ -1623,6 +1653,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
fakeRef := storage.SeriesRef(1)
|
||||
|
@ -1682,11 +1713,12 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
// Get the value of the Counter before performing the append.
|
||||
beforeMetric := dto.Metric{}
|
||||
err := targetScrapeSampleLimit.Write(&beforeMetric)
|
||||
err := sl.metrics.targetScrapeSampleLimit.Write(&beforeMetric)
|
||||
require.NoError(t, err)
|
||||
|
||||
beforeMetricValue := beforeMetric.GetCounter().GetValue()
|
||||
|
@ -1705,7 +1737,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
|
|||
// Check that the Counter has been incremented a single time for the scrape,
|
||||
// not multiple times for each sample.
|
||||
metric := dto.Metric{}
|
||||
err = targetScrapeSampleLimit.Write(&metric)
|
||||
err = sl.metrics.targetScrapeSampleLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
|
||||
value := metric.GetCounter().GetValue()
|
||||
|
@ -1760,10 +1792,11 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
metric := dto.Metric{}
|
||||
err := targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
err := sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
beforeMetricValue := metric.GetCounter().GetValue()
|
||||
|
||||
|
@ -1801,7 +1834,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
|||
require.Equal(t, 3, added)
|
||||
require.Equal(t, 3, seriesAdded)
|
||||
|
||||
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
metricValue := metric.GetCounter().GetValue()
|
||||
require.Equal(t, beforeMetricValue, metricValue)
|
||||
|
@ -1827,7 +1860,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) {
|
|||
require.Equal(t, 3, added)
|
||||
require.Equal(t, 0, seriesAdded)
|
||||
|
||||
err = targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
err = sl.metrics.targetScrapeNativeHistogramBucketLimit.Write(&metric)
|
||||
require.NoError(t, err)
|
||||
metricValue = metric.GetCounter().GetValue()
|
||||
require.Equal(t, beforeMetricValue+1, metricValue)
|
||||
|
@ -1859,6 +1892,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1908,6 +1942,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -1960,6 +1995,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2286,6 +2322,7 @@ metric: <
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2374,6 +2411,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2427,6 +2465,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||
|
@ -2464,6 +2503,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
|
||||
|
@ -2514,6 +2554,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Unix(1, 0)
|
||||
|
@ -2560,6 +2601,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now().Add(20 * time.Minute)
|
||||
|
@ -2619,15 +2661,17 @@ func TestTargetScraperScrapeOK(t *testing.T) {
|
|||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
contentType, err := ts.scrape(context.Background(), &buf)
|
||||
resp, err := ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
contentType, err := ts.readResponse(context.Background(), resp, &buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "text/plain; version=0.0.4", contentType)
|
||||
require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String())
|
||||
}
|
||||
|
||||
runTest(scrapeAcceptHeader)
|
||||
runTest(acceptHeader(config.DefaultScrapeProtocols))
|
||||
protobufParsing = true
|
||||
runTest(scrapeAcceptHeaderWithProtobuf)
|
||||
runTest(acceptHeader(config.DefaultNativeHistogramScrapeProtocols))
|
||||
}
|
||||
|
||||
func TestTargetScrapeScrapeCancel(t *testing.T) {
|
||||
|
@ -2653,7 +2697,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
|
@ -2665,7 +2709,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
_, err := ts.scrape(ctx, io.Discard)
|
||||
_, err := ts.scrape(ctx)
|
||||
switch {
|
||||
case err == nil:
|
||||
errc <- errors.New("Expected error but got nil")
|
||||
|
@ -2708,10 +2752,12 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
|
|||
),
|
||||
},
|
||||
client: http.DefaultClient,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
}
|
||||
|
||||
_, err = ts.scrape(context.Background(), io.Discard)
|
||||
resp, err := ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = ts.readResponse(context.Background(), resp, io.Discard)
|
||||
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
|
||||
}
|
||||
|
||||
|
@ -2750,31 +2796,40 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
|
|||
},
|
||||
client: http.DefaultClient,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: scrapeAcceptHeader,
|
||||
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols),
|
||||
metrics: newTestScrapeMetrics(t),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Target response uncompressed body, scrape with body size limit.
|
||||
_, err = ts.scrape(context.Background(), &buf)
|
||||
resp, err := ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = ts.readResponse(context.Background(), resp, &buf)
|
||||
require.ErrorIs(t, err, errBodySizeLimit)
|
||||
require.Equal(t, bodySizeLimit, buf.Len())
|
||||
// Target response gzip compressed body, scrape with body size limit.
|
||||
gzipResponse = true
|
||||
buf.Reset()
|
||||
_, err = ts.scrape(context.Background(), &buf)
|
||||
resp, err = ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = ts.readResponse(context.Background(), resp, &buf)
|
||||
require.ErrorIs(t, err, errBodySizeLimit)
|
||||
require.Equal(t, bodySizeLimit, buf.Len())
|
||||
// Target response uncompressed body, scrape without body size limit.
|
||||
gzipResponse = false
|
||||
buf.Reset()
|
||||
ts.bodySizeLimit = 0
|
||||
_, err = ts.scrape(context.Background(), &buf)
|
||||
resp, err = ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = ts.readResponse(context.Background(), resp, &buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(responseBody), buf.Len())
|
||||
// Target response gzip compressed body, scrape without body size limit.
|
||||
gzipResponse = true
|
||||
buf.Reset()
|
||||
_, err = ts.scrape(context.Background(), &buf)
|
||||
resp, err = ts.scrape(context.Background())
|
||||
require.NoError(t, err)
|
||||
_, err = ts.readResponse(context.Background(), resp, &buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(responseBody), buf.Len())
|
||||
}
|
||||
|
@ -2802,7 +2857,11 @@ func (ts *testScraper) Report(start time.Time, duration time.Duration, err error
|
|||
ts.lastError = err
|
||||
}
|
||||
|
||||
func (ts *testScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
|
||||
func (ts *testScraper) scrape(ctx context.Context) (*http.Response, error) {
|
||||
return nil, ts.scrapeErr
|
||||
}
|
||||
|
||||
func (ts *testScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
|
||||
if ts.scrapeFunc != nil {
|
||||
return "", ts.scrapeFunc(ctx, w)
|
||||
}
|
||||
|
@ -2833,6 +2892,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2875,6 +2935,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
@ -2916,6 +2977,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -2975,6 +3037,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -3067,7 +3130,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
|
||||
}
|
||||
|
@ -3239,6 +3302,7 @@ func TestScrapeAddFast(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
|
@ -3259,7 +3323,7 @@ func TestScrapeAddFast(t *testing.T) {
|
|||
require.NoError(t, slApp.Commit())
|
||||
}
|
||||
|
||||
func TestReuseCacheRace(*testing.T) {
|
||||
func TestReuseCacheRace(t *testing.T) {
|
||||
var (
|
||||
app = &nopAppendable{}
|
||||
cfg = &config.ScrapeConfig{
|
||||
|
@ -3268,7 +3332,7 @@ func TestReuseCacheRace(*testing.T) {
|
|||
ScrapeInterval: model.Duration(5 * time.Second),
|
||||
MetricsPath: "/metrics",
|
||||
}
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{})
|
||||
sp, _ = newScrapePool(cfg, app, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
t1 = &Target{
|
||||
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
|
||||
}
|
||||
|
@ -3293,7 +3357,7 @@ func TestReuseCacheRace(*testing.T) {
|
|||
|
||||
func TestCheckAddError(t *testing.T) {
|
||||
var appErrs appendErrors
|
||||
sl := scrapeLoop{l: log.NewNopLogger()}
|
||||
sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)}
|
||||
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
|
||||
require.Equal(t, 1, appErrs.numOutOfOrder)
|
||||
}
|
||||
|
@ -3326,6 +3390,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
numScrapes := 0
|
||||
|
@ -3396,7 +3461,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
|||
}))
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, &Options{})
|
||||
sp, err := newScrapePool(cfg, s, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
|
@ -3529,6 +3594,7 @@ func TestScrapeLoopLabelLimit(t *testing.T) {
|
|||
false,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
)
|
||||
|
||||
slApp := sl.appender(context.Background())
|
||||
|
@ -3567,7 +3633,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{})
|
||||
sp, _ := newScrapePool(config, &nopAppendable{}, 0, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
tgts := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: "127.0.0.1:9090"}},
|
||||
|
|
|
@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
|
|
@ -347,7 +347,7 @@ func (c *genericMergeSeriesSet) Next() bool {
|
|||
}
|
||||
|
||||
// Now, pop items of the heap that have equal label sets.
|
||||
c.currentSets = nil
|
||||
c.currentSets = c.currentSets[:0]
|
||||
c.currentLabels = c.heap[0].At().Labels()
|
||||
for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) {
|
||||
set := heap.Pop(&c.heap).(genericSeriesSet)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
|
|
|
@ -802,11 +802,6 @@ func (a *appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v flo
|
|||
series.Lock()
|
||||
defer series.Unlock()
|
||||
|
||||
if t < series.lastTs {
|
||||
a.metrics.totalOutOfOrderSamples.Inc()
|
||||
return 0, storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
// NOTE: always modify pendingSamples and sampleSeries together.
|
||||
a.pendingSamples = append(a.pendingSamples, record.RefSample{
|
||||
Ref: series.ref,
|
||||
|
@ -930,11 +925,6 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
|
|||
series.Lock()
|
||||
defer series.Unlock()
|
||||
|
||||
if t < series.lastTs {
|
||||
a.metrics.totalOutOfOrderSamples.Inc()
|
||||
return 0, storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
switch {
|
||||
case h != nil:
|
||||
// NOTE: always modify pendingHistograms and histogramSeries together
|
||||
|
|
|
@ -751,3 +751,130 @@ func TestStorage_DuplicateExemplarsIgnored(t *testing.T) {
|
|||
// We had 9 calls to AppendExemplar but only 4 of those should have gotten through.
|
||||
require.Equal(t, 4, walExemplarsCount)
|
||||
}
|
||||
|
||||
func TestDBAllowOOOSamples(t *testing.T) {
|
||||
const (
|
||||
numDatapoints = 5
|
||||
numHistograms = 5
|
||||
numSeries = 4
|
||||
offset = 100
|
||||
)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
s := createTestAgentDB(t, reg, DefaultOptions())
|
||||
app := s.Appender(context.TODO())
|
||||
|
||||
// Let's add some samples in the [offset, offset+numDatapoints) range.
|
||||
lbls := labelsForTest(t.Name(), numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
for i := offset; i < numDatapoints+offset; i++ {
|
||||
ref, err := app.Append(0, lset, int64(i), float64(i))
|
||||
require.NoError(t, err)
|
||||
|
||||
e := exemplar.Exemplar{
|
||||
Labels: lset,
|
||||
Ts: int64(i) * 2,
|
||||
Value: float64(i),
|
||||
HasTs: true,
|
||||
}
|
||||
_, err = app.AppendExemplar(ref, lset, e)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := offset; i < numDatapoints+offset; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i-offset], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := offset; i < numDatapoints+offset; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i-offset])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
|
||||
require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
|
||||
require.Equal(t, float64(40), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.NoError(t, s.Close())
|
||||
|
||||
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
|
||||
// We need the original directory so we can recreate the storage for replay.
|
||||
storageDir := filepath.Dir(s.wal.Dir())
|
||||
|
||||
// Replay the storage so that the lastTs for each series is recorded.
|
||||
reg2 := prometheus.NewRegistry()
|
||||
db, err := Open(s.logger, reg2, nil, storageDir, s.opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create storage for the agent: %v", err)
|
||||
}
|
||||
|
||||
app = db.Appender(context.Background())
|
||||
|
||||
// Now the lastTs will have been recorded successfully.
|
||||
// Let's try appending twice as many OOO samples in the [0, numDatapoints) range.
|
||||
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
ref, err := app.Append(0, lset, int64(i), float64(i))
|
||||
require.NoError(t, err)
|
||||
|
||||
e := exemplar.Exemplar{
|
||||
Labels: lset,
|
||||
Ts: int64(i) * 2,
|
||||
Value: float64(i),
|
||||
HasTs: true,
|
||||
}
|
||||
_, err = app.AppendExemplar(ref, lset, e)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
m = gatherFamily(t, reg2, "prometheus_agent_samples_appended_total")
|
||||
require.Equal(t, float64(40), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
|
||||
require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.NoError(t, db.Close())
|
||||
}
|
||||
|
|
|
@ -201,7 +201,14 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) {
|
|||
|
||||
func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) {
|
||||
slices.SortFunc(dms, func(a, b dirMeta) int {
|
||||
return int(a.meta.MinTime - b.meta.MinTime)
|
||||
switch {
|
||||
case a.meta.MinTime < b.meta.MinTime:
|
||||
return -1
|
||||
case a.meta.MinTime > b.meta.MinTime:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
res := c.selectOverlappingDirs(dms)
|
||||
|
|
46
tsdb/db.go
46
tsdb/db.go
|
@ -248,6 +248,7 @@ type dbMetrics struct {
|
|||
tombCleanTimer prometheus.Histogram
|
||||
blocksBytes prometheus.Gauge
|
||||
maxBytes prometheus.Gauge
|
||||
retentionDuration prometheus.Gauge
|
||||
}
|
||||
|
||||
func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
||||
|
@ -321,6 +322,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
|||
Name: "prometheus_tsdb_retention_limit_bytes",
|
||||
Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled",
|
||||
})
|
||||
m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "prometheus_tsdb_retention_limit_seconds",
|
||||
Help: "How long to retain samples in storage.",
|
||||
})
|
||||
m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_size_retentions_total",
|
||||
Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.",
|
||||
|
@ -341,6 +346,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
|
|||
m.tombCleanTimer,
|
||||
m.blocksBytes,
|
||||
m.maxBytes,
|
||||
m.retentionDuration,
|
||||
)
|
||||
}
|
||||
return m
|
||||
|
@ -580,7 +586,14 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) {
|
|||
}
|
||||
|
||||
slices.SortFunc(loadable, func(a, b *Block) int {
|
||||
return int(a.Meta().MinTime - b.Meta().MinTime)
|
||||
switch {
|
||||
case a.Meta().MinTime < b.Meta().MinTime:
|
||||
return -1
|
||||
case a.Meta().MinTime > b.Meta().MinTime:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
blockMetas := make([]BlockMeta, 0, len(loadable))
|
||||
|
@ -870,6 +883,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
maxBytes = 0
|
||||
}
|
||||
db.metrics.maxBytes.Set(float64(maxBytes))
|
||||
db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds())
|
||||
|
||||
if err := db.reload(); err != nil {
|
||||
return nil, err
|
||||
|
@ -887,13 +901,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
|
||||
if initErr := db.head.Init(minValidTime); initErr != nil {
|
||||
db.head.metrics.walCorruptionsTotal.Inc()
|
||||
isOOOErr := isErrLoadOOOWal(initErr)
|
||||
if isOOOErr {
|
||||
level.Warn(db.logger).Log("msg", "Encountered OOO WAL read error, attempting repair", "err", initErr)
|
||||
if err := wbl.Repair(initErr); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted OOO WAL")
|
||||
e, ok := initErr.(*errLoadWbl)
|
||||
if ok {
|
||||
level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr)
|
||||
if err := wbl.Repair(e.err); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted WBL")
|
||||
}
|
||||
level.Info(db.logger).Log("msg", "Successfully repaired OOO WAL")
|
||||
level.Info(db.logger).Log("msg", "Successfully repaired WBL")
|
||||
} else {
|
||||
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
|
||||
if err := wal.Repair(initErr); err != nil {
|
||||
|
@ -1448,7 +1462,14 @@ func (db *DB) reloadBlocks() (err error) {
|
|||
db.metrics.blocksBytes.Set(float64(blocksSize))
|
||||
|
||||
slices.SortFunc(toLoad, func(a, b *Block) int {
|
||||
return int(a.Meta().MinTime - b.Meta().MinTime)
|
||||
switch {
|
||||
case a.Meta().MinTime < b.Meta().MinTime:
|
||||
return -1
|
||||
case a.Meta().MinTime > b.Meta().MinTime:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
// Swap new blocks first for subsequently created readers to be seen.
|
||||
|
@ -1518,7 +1539,14 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} {
|
|||
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
|
||||
// This ensures that the retentions will remove the oldest blocks.
|
||||
slices.SortFunc(blocks, func(a, b *Block) int {
|
||||
return int(b.Meta().MaxTime - a.Meta().MaxTime)
|
||||
switch {
|
||||
case b.Meta().MaxTime < a.Meta().MaxTime:
|
||||
return -1
|
||||
case b.Meta().MaxTime > a.Meta().MaxTime:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
|
||||
for _, block := range blocks {
|
||||
|
|
|
@ -1494,6 +1494,19 @@ func TestTimeRetention(t *testing.T) {
|
|||
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime)
|
||||
}
|
||||
|
||||
func TestRetentionDurationMetric(t *testing.T) {
|
||||
db := openTestDB(t, &Options{
|
||||
RetentionDuration: 1000,
|
||||
}, []int64{100})
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
|
||||
expRetentionDuration := 1.0
|
||||
actRetentionDuration := prom_testutil.ToFloat64(db.metrics.retentionDuration)
|
||||
require.Equal(t, expRetentionDuration, actRetentionDuration, "metric retention duration mismatch")
|
||||
}
|
||||
|
||||
func TestSizeRetention(t *testing.T) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 100
|
||||
|
@ -3793,7 +3806,7 @@ func TestOOOWALWrite(t *testing.T) {
|
|||
actRecs := getRecords(path.Join(dir, "wal"))
|
||||
require.Equal(t, inOrderRecords, actRecs)
|
||||
|
||||
// The OOO WAL.
|
||||
// The WBL.
|
||||
actRecs = getRecords(path.Join(dir, wlog.WblDirName))
|
||||
require.Equal(t, oooRecords, actRecs)
|
||||
}
|
||||
|
|
|
@ -765,17 +765,17 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
|
||||
wblReplayStart := time.Now()
|
||||
if h.wbl != nil {
|
||||
// Replay OOO WAL.
|
||||
// Replay WBL.
|
||||
startFrom, endAt, e = wlog.Segments(h.wbl.Dir())
|
||||
if e != nil {
|
||||
return errors.Wrap(e, "finding OOO WAL segments")
|
||||
return &errLoadWbl{errors.Wrap(e, "finding WBL segments")}
|
||||
}
|
||||
h.startWALReplayStatus(startFrom, endAt)
|
||||
|
||||
for i := startFrom; i <= endAt; i++ {
|
||||
s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))
|
||||
return &errLoadWbl{errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))}
|
||||
}
|
||||
|
||||
sr := wlog.NewSegmentBufReader(s)
|
||||
|
@ -784,7 +784,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return &errLoadWbl{err}
|
||||
}
|
||||
level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt)
|
||||
h.updateWALReplayStatusRead(i)
|
||||
|
|
|
@ -2086,6 +2086,79 @@ func TestWalRepair_DecodingError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestWblRepair_DecodingError ensures that a repair is run for an error
|
||||
// when decoding a record.
|
||||
func TestWblRepair_DecodingError(t *testing.T) {
|
||||
var enc record.Encoder
|
||||
corrFunc := func(rec []byte) []byte {
|
||||
return rec[:3]
|
||||
}
|
||||
rec := enc.Samples([]record.RefSample{{Ref: 0, T: 99, V: 1}}, []byte{})
|
||||
totalRecs := 9
|
||||
expRecs := 5
|
||||
dir := t.TempDir()
|
||||
|
||||
// Fill the wbl and corrupt it.
|
||||
{
|
||||
wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone)
|
||||
require.NoError(t, err)
|
||||
wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 1; i <= totalRecs; i++ {
|
||||
// At this point insert a corrupted record.
|
||||
if i-1 == expRecs {
|
||||
require.NoError(t, wbl.Log(corrFunc(rec)))
|
||||
continue
|
||||
}
|
||||
require.NoError(t, wbl.Log(rec))
|
||||
}
|
||||
|
||||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = 1
|
||||
opts.ChunkDirRoot = wal.Dir()
|
||||
opts.OutOfOrderCapMax.Store(30)
|
||||
opts.OutOfOrderTimeWindow.Store(1000 * time.Minute.Milliseconds())
|
||||
h, err := NewHead(nil, nil, wal, wbl, opts, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(h.metrics.walCorruptionsTotal))
|
||||
initErr := h.Init(math.MinInt64)
|
||||
|
||||
_, ok := initErr.(*errLoadWbl)
|
||||
require.True(t, ok) // Wbl errors are wrapped into errLoadWbl, make sure we can unwrap it.
|
||||
|
||||
err = errors.Cause(initErr) // So that we can pick up errors even if wrapped.
|
||||
_, corrErr := err.(*wlog.CorruptionErr)
|
||||
require.True(t, corrErr, "reading the wal didn't return corruption error")
|
||||
require.NoError(t, h.Close()) // Head will close the wal as well.
|
||||
}
|
||||
|
||||
// Open the db to trigger a repair.
|
||||
{
|
||||
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
}()
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.walCorruptionsTotal))
|
||||
}
|
||||
|
||||
// Read the wbl content after the repair.
|
||||
{
|
||||
sr, err := wlog.NewSegmentsReader(filepath.Join(dir, "wbl"))
|
||||
require.NoError(t, err)
|
||||
defer sr.Close()
|
||||
r := wlog.NewReader(sr)
|
||||
|
||||
var actRec int
|
||||
for r.Next() {
|
||||
actRec++
|
||||
}
|
||||
require.NoError(t, r.Err())
|
||||
require.Equal(t, expRecs, actRec, "Wrong number of intact records")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadReadWriterRepair(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
|
@ -4446,9 +4519,8 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
|
|||
require.Greater(t, offset, 0)
|
||||
}
|
||||
|
||||
// TestOOOWalReplay checks the replay at a low level.
|
||||
// TODO(codesome): Needs test for ooo WAL repair.
|
||||
func TestOOOWalReplay(t *testing.T) {
|
||||
// TestWBLReplay checks the replay at a low level.
|
||||
func TestWBLReplay(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -825,22 +825,14 @@ func (e errLoadWbl) Error() string {
|
|||
return e.err.Error()
|
||||
}
|
||||
|
||||
// To support errors.Cause().
|
||||
func (e errLoadWbl) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// To support errors.Unwrap().
|
||||
func (e errLoadWbl) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// isErrLoadOOOWal returns a boolean if the error is errLoadWbl.
|
||||
func isErrLoadOOOWal(err error) bool {
|
||||
_, ok := err.(*errLoadWbl)
|
||||
return ok
|
||||
}
|
||||
|
||||
type wblSubsetProcessor struct {
|
||||
input chan wblSubsetProcessorInputItem
|
||||
output chan []record.RefSample
|
||||
|
|
|
@ -413,6 +413,7 @@ type Postings interface {
|
|||
Seek(v storage.SeriesRef) bool
|
||||
|
||||
// At returns the value at the current iterator position.
|
||||
// At should only be called after a successful call to Next or Seek.
|
||||
At() storage.SeriesRef
|
||||
|
||||
// Err returns the last error of the iterator.
|
||||
|
@ -747,7 +748,7 @@ func (rp *removedPostings) Err() error {
|
|||
// ListPostings implements the Postings interface over a plain list.
|
||||
type ListPostings struct {
|
||||
list []storage.SeriesRef
|
||||
pos int
|
||||
cur storage.SeriesRef
|
||||
}
|
||||
|
||||
func NewListPostings(list []storage.SeriesRef) Postings {
|
||||
|
@ -759,34 +760,39 @@ func newListPostings(list ...storage.SeriesRef) *ListPostings {
|
|||
}
|
||||
|
||||
func (it *ListPostings) At() storage.SeriesRef {
|
||||
return it.list[it.pos-1]
|
||||
return it.cur
|
||||
}
|
||||
|
||||
func (it *ListPostings) Next() bool {
|
||||
if it.pos < len(it.list) {
|
||||
it.pos++
|
||||
if len(it.list) > 0 {
|
||||
it.cur = it.list[0]
|
||||
it.list = it.list[1:]
|
||||
return true
|
||||
}
|
||||
it.cur = 0
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *ListPostings) Seek(x storage.SeriesRef) bool {
|
||||
if it.pos == 0 {
|
||||
it.pos++
|
||||
}
|
||||
if it.pos > len(it.list) {
|
||||
return false
|
||||
}
|
||||
// If the current value satisfies, then return.
|
||||
if it.list[it.pos-1] >= x {
|
||||
if it.cur >= x {
|
||||
return true
|
||||
}
|
||||
if len(it.list) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Do binary search between current position and end.
|
||||
it.pos = sort.Search(len(it.list[it.pos-1:]), func(i int) bool {
|
||||
return it.list[i+it.pos-1] >= x
|
||||
}) + it.pos
|
||||
return it.pos-1 < len(it.list)
|
||||
i := sort.Search(len(it.list), func(i int) bool {
|
||||
return it.list[i] >= x
|
||||
})
|
||||
if i < len(it.list) {
|
||||
it.cur = it.list[i]
|
||||
it.list = it.list[i+1:]
|
||||
return true
|
||||
}
|
||||
it.list = nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *ListPostings) Err() error {
|
||||
|
|
|
@ -1242,6 +1242,8 @@ func TestListPostings(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
// BenchmarkListPostings benchmarks ListPostings by iterating Next/At sequentially.
|
||||
// See also BenchmarkIntersect as it performs more `At` calls than `Next` calls when intersecting.
|
||||
func BenchmarkListPostings(b *testing.B) {
|
||||
const maxCount = 1e6
|
||||
input := make([]storage.SeriesRef, maxCount)
|
||||
|
|
|
@ -63,6 +63,15 @@ func (m *maxHeap) push(item Stat) {
|
|||
}
|
||||
|
||||
func (m *maxHeap) get() []Stat {
|
||||
slices.SortFunc(m.Items, func(a, b Stat) int { return int(b.Count - a.Count) })
|
||||
slices.SortFunc(m.Items, func(a, b Stat) int {
|
||||
switch {
|
||||
case b.Count < a.Count:
|
||||
return -1
|
||||
case b.Count > a.Count:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
})
|
||||
return m.Items
|
||||
}
|
||||
|
|
|
@ -178,17 +178,39 @@ type chunkMetaAndChunkDiskMapperRef struct {
|
|||
}
|
||||
|
||||
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
|
||||
if a.meta.MinTime == b.meta.MinTime {
|
||||
return int(a.meta.Ref - b.meta.Ref)
|
||||
switch {
|
||||
case a.meta.MinTime < b.meta.MinTime:
|
||||
return -1
|
||||
case a.meta.MinTime > b.meta.MinTime:
|
||||
return 1
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.meta.Ref < b.meta.Ref:
|
||||
return -1
|
||||
case a.meta.Ref > b.meta.Ref:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
return int(a.meta.MinTime - b.meta.MinTime)
|
||||
}
|
||||
|
||||
func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
|
||||
if a.MinTime == b.MinTime {
|
||||
return int(a.Ref - b.Ref)
|
||||
switch {
|
||||
case a.MinTime < b.MinTime:
|
||||
return -1
|
||||
case a.MinTime > b.MinTime:
|
||||
return 1
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.Ref < b.Ref:
|
||||
return -1
|
||||
case a.Ref > b.Ref:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
return int(a.MinTime - b.MinTime)
|
||||
}
|
||||
|
||||
func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
|
||||
|
|
|
@ -304,6 +304,10 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error)
|
|||
baseRef = dec.Be64()
|
||||
baseTime = dec.Be64int64()
|
||||
)
|
||||
// Allow 1 byte for each varint and 8 for the value; the output slice must be at least that big.
|
||||
if minSize := dec.Len() / (1 + 1 + 8); cap(samples) < minSize {
|
||||
samples = make([]RefSample, 0, minSize)
|
||||
}
|
||||
for len(dec.B) > 0 && dec.Err() == nil {
|
||||
dref := dec.Varint64()
|
||||
dtime := dec.Varint64()
|
||||
|
|
|
@ -105,7 +105,8 @@ var (
|
|||
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for metric name", PromQLWarning)
|
||||
MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning)
|
||||
|
||||
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count:", PromQLInfo)
|
||||
PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo)
|
||||
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (and may give inaccurate results) for metric name", PromQLInfo)
|
||||
)
|
||||
|
||||
type annoErr struct {
|
||||
|
@ -155,11 +156,20 @@ func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.Posi
|
|||
}
|
||||
}
|
||||
|
||||
// NewPossibleNonCounterInfo is used when a counter metric does not have the suffixes
|
||||
// _total, _sum or _count.
|
||||
// NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not
|
||||
// have the suffixes _total, _sum, _count, or _bucket.
|
||||
func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) annoErr {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName),
|
||||
}
|
||||
}
|
||||
|
||||
// NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to
|
||||
// histogram_quantile needs to be forced to be monotonic.
|
||||
func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) annoErr {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ import * as React from 'react';
|
|||
import { shallow } from 'enzyme';
|
||||
import { WALReplayData } from '../types/types';
|
||||
import { StartingContent } from './withStartingIndicator';
|
||||
import { Progress } from 'reactstrap';
|
||||
import { Alert, Progress } from 'reactstrap';
|
||||
|
||||
describe('Starting', () => {
|
||||
describe('progress bar', () => {
|
||||
|
@ -52,5 +52,17 @@ describe('Starting', () => {
|
|||
expect(progress.prop('value')).toBe(21);
|
||||
expect(progress.prop('color')).toBe('success');
|
||||
});
|
||||
|
||||
it('shows unexpected error', () => {
|
||||
const status: WALReplayData = {
|
||||
min: 0,
|
||||
max: 20,
|
||||
current: 0,
|
||||
};
|
||||
|
||||
const starting = shallow(<StartingContent status={status} isUnexpected={true} />);
|
||||
const alert = starting.find(Alert);
|
||||
expect(alert.prop('color')).toBe('danger');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -51,7 +51,7 @@ export const withStartingIndicator =
|
|||
const { ready, walReplayStatus, isUnexpected } = useFetchReadyInterval(pathPrefix);
|
||||
const staticReady = useReady();
|
||||
|
||||
if (staticReady || ready || isUnexpected) {
|
||||
if (staticReady || ready) {
|
||||
return <Page {...(rest as T)} />;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ interface RulesContentProps {
|
|||
interface RuleGroup {
|
||||
name: string;
|
||||
file: string;
|
||||
interval: string;
|
||||
rules: Rule[];
|
||||
evaluationTime: string;
|
||||
lastEvaluation: string;
|
||||
|
@ -58,13 +59,16 @@ export const RulesContent: FC<RulesContentProps> = ({ response }) => {
|
|||
<Table bordered key={i}>
|
||||
<thead>
|
||||
<tr>
|
||||
<td colSpan={3}>
|
||||
<td>
|
||||
<a href={'#' + g.name}>
|
||||
<h4 id={g.name} className="text-break">
|
||||
{g.name}
|
||||
</h4>
|
||||
</a>
|
||||
</td>
|
||||
<td colSpan={2}>
|
||||
<h4>Interval: {humanizeDuration(parseFloat(g.interval) * 1000)}</h4>
|
||||
</td>
|
||||
<td>
|
||||
<h4>{formatRelative(g.lastEvaluation, now())}</h4>
|
||||
</td>
|
||||
|
|
Loading…
Reference in a new issue