mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge remote-tracking branch 'prometheus/main' into arve/close-engine
This commit is contained in:
commit
7c873004c7
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
|
||||
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
|
||||
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
|
@ -143,6 +143,18 @@ jobs:
|
|||
with:
|
||||
parallelism: 12
|
||||
thread: ${{ matrix.thread }}
|
||||
build_all_status:
|
||||
name: Report status of build Prometheus for all architectures
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build_all]
|
||||
if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')
|
||||
steps:
|
||||
- name: Successful build
|
||||
if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
|
||||
run: exit 0
|
||||
- name: Failing or cancelled build
|
||||
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
|
||||
run: exit 1
|
||||
check_generated_parser:
|
||||
name: Check generated parser
|
||||
runs-on: ubuntu-latest
|
||||
|
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
|
||||
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
|
||||
|
|
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
|
||||
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
@ -1,12 +1,5 @@
|
|||
run:
|
||||
timeout: 15m
|
||||
skip-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
skip-dirs:
|
||||
# Copied it from a different source
|
||||
- storage/remote/otlptranslator/prometheusremotewrite
|
||||
- storage/remote/otlptranslator/prometheus
|
||||
|
||||
output:
|
||||
sort-results: true
|
||||
|
@ -33,6 +26,13 @@ linters:
|
|||
|
||||
issues:
|
||||
max-same-issues: 0
|
||||
exclude-files:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
exclude-dirs:
|
||||
# Copied it from a different source
|
||||
- storage/remote/otlptranslator/prometheusremotewrite
|
||||
- storage/remote/otlptranslator/prometheus
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- gocritic
|
||||
|
|
|
@ -2,6 +2,10 @@
|
|||
|
||||
## unreleased
|
||||
|
||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
||||
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
|
||||
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
|
||||
|
||||
## 2.53.1 / 2024-07-10
|
||||
|
||||
Fix a bug which would drop samples in remote-write if the sending flow stalled
|
||||
|
|
|
@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
// TODO(bwplotka): Add PRW 2.0 support.
|
||||
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
|
||||
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
|
||||
if err != nil {
|
||||
|
@ -116,7 +117,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s
|
|||
|
||||
// Encode the request body into snappy encoding.
|
||||
compressed := snappy.Encode(nil, raw)
|
||||
err = client.Store(context.Background(), compressed, 0)
|
||||
_, err = client.Store(context.Background(), compressed, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return false
|
||||
|
|
|
@ -227,6 +227,9 @@ var (
|
|||
DefaultExemplarsConfig = ExemplarsConfig{
|
||||
MaxExemplars: 100000,
|
||||
}
|
||||
|
||||
// DefaultOTLPConfig is the default OTLP configuration.
|
||||
DefaultOTLPConfig = OTLPConfig{}
|
||||
)
|
||||
|
||||
// Config is the top-level configuration for Prometheus's config files.
|
||||
|
@ -242,6 +245,7 @@ type Config struct {
|
|||
|
||||
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
||||
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
|
@ -1304,3 +1308,35 @@ func getGoGCEnv() int {
|
|||
}
|
||||
return DefaultRuntimeConfig.GoGC
|
||||
}
|
||||
|
||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||
type OTLPConfig struct {
|
||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultOTLPConfig
|
||||
type plain OTLPConfig
|
||||
if err := unmarshal((*plain)(c)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
var err error
|
||||
for i, attr := range c.PromoteResourceAttributes {
|
||||
attr = strings.TrimSpace(attr)
|
||||
if attr == "" {
|
||||
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[attr]; exists {
|
||||
err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr))
|
||||
continue
|
||||
}
|
||||
|
||||
seen[attr] = struct{}{}
|
||||
c.PromoteResourceAttributes[i] = attr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -156,6 +156,12 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
|
||||
OTLPConfig: OTLPConfig{
|
||||
PromoteResourceAttributes: []string{
|
||||
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
||||
},
|
||||
},
|
||||
|
||||
RemoteReadConfigs: []*RemoteReadConfig{
|
||||
{
|
||||
URL: mustParseURL("http://remote1/read"),
|
||||
|
@ -1471,6 +1477,26 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
|
||||
}
|
||||
|
||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
var got Config
|
||||
require.NoError(t, yaml.UnmarshalStrict(out, &got))
|
||||
|
||||
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes)
|
||||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
|
||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
|
|
3
config/testdata/conf.good.yml
vendored
3
config/testdata/conf.good.yml
vendored
|
@ -45,6 +45,9 @@ remote_write:
|
|||
headers:
|
||||
name: value
|
||||
|
||||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"]
|
||||
|
||||
remote_read:
|
||||
- url: http://remote1/read
|
||||
read_recent: true
|
||||
|
|
2
config/testdata/otlp_sanitize_resource_attributes.bad.yml
vendored
Normal file
2
config/testdata/otlp_sanitize_resource_attributes.bad.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""]
|
2
config/testdata/otlp_sanitize_resource_attributes.good.yml
vendored
Normal file
2
config/testdata/otlp_sanitize_resource_attributes.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"]
|
|
@ -17,7 +17,7 @@ import (
|
|||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -34,7 +34,7 @@ const (
|
|||
)
|
||||
|
||||
func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) {
|
||||
networks, err := client.NetworkList(ctx, types.NetworkListOptions{})
|
||||
networks, err := client.NetworkList(ctx, network.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
|
||||
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
|
||||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` |
|
||||
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
|
||||
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
|
||||
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |
|
||||
|
|
|
@ -152,6 +152,10 @@ alerting:
|
|||
remote_write:
|
||||
[ - <remote_write> ... ]
|
||||
|
||||
# Settings related to the OTLP receiver feature.
|
||||
otlp:
|
||||
[ promote_resource_attributes: [<string>, ...] | default = [ ] ]
|
||||
|
||||
# Settings related to the remote read feature.
|
||||
remote_read:
|
||||
[ - <remote_read> ... ]
|
||||
|
@ -458,13 +462,15 @@ metric_relabel_configs:
|
|||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. If this is exceeded, the entire scrape will be treated as
|
||||
# failed. 0 means no limit.
|
||||
# native histogram. The resolution of a histogram with more buckets will be
|
||||
# reduced until the number of buckets is within the limit. If the limit cannot
|
||||
# be reached, the scrape will fail.
|
||||
# 0 means no limit.
|
||||
[ native_histogram_bucket_limit: <int> | default = 0 ]
|
||||
|
||||
# Lower limit for the growth factor of one bucket to the next in each native
|
||||
# histogram. The resolution of a histogram with a lower growth factor will be
|
||||
# reduced until it is within the limit.
|
||||
# reduced as much as possible until it is within the limit.
|
||||
# To set an upper limit for the schema (equivalent to "scale" in OTel's
|
||||
# exponential histograms), use the following factor limits:
|
||||
#
|
||||
|
|
|
@ -8,9 +8,15 @@ sort_rank: 1
|
|||
|
||||
Prometheus provides a functional query language called PromQL (Prometheus Query
|
||||
Language) that lets the user select and aggregate time series data in real
|
||||
time. The result of an expression can either be shown as a graph, viewed as
|
||||
tabular data in Prometheus's expression browser, or consumed by external
|
||||
systems via the [HTTP API](api.md).
|
||||
time.
|
||||
|
||||
When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time,
|
||||
or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same
|
||||
in each cases; the range query is just like an instant query run multiple times at different timestamps.
|
||||
|
||||
In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries.
|
||||
|
||||
Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md).
|
||||
|
||||
## Examples
|
||||
|
||||
|
@ -94,9 +100,7 @@ Examples:
|
|||
|
||||
## Time series selectors
|
||||
|
||||
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
|
||||
|
||||
Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps.
|
||||
These are the basic building-blocks that instruct PromQL what data to fetch.
|
||||
|
||||
### Instant vector selectors
|
||||
|
||||
|
|
|
@ -137,6 +137,18 @@ will be used.
|
|||
Expired block cleanup happens in the background. It may take up to two hours
|
||||
to remove expired blocks. Blocks must be fully expired before they are removed.
|
||||
|
||||
## Right-Sizing Retention Size
|
||||
|
||||
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
|
||||
will want to consider the right size for this value relative to the storage you
|
||||
have allocated for Prometheus. It is wise to reduce the retention size to provide
|
||||
a buffer, ensuring that older entries will be removed before the allocated storage
|
||||
for Prometheus becomes full.
|
||||
|
||||
At present, we recommend setting the retention size to, at most, 80-85% of your
|
||||
allocated Prometheus disk space. This increases the likelihood that older entires
|
||||
will be removed prior to hitting any disk limitations.
|
||||
|
||||
## Remote storage integrations
|
||||
|
||||
Prometheus's local storage is limited to a single node's scalability and durability.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
module github.com/prometheus/prometheus/documentation/examples/remote_storage
|
||||
|
||||
go 1.21
|
||||
go 1.21.0
|
||||
|
||||
require (
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
|
@ -10,7 +10,7 @@ require (
|
|||
github.com/influxdata/influxdb v1.11.5
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/common v0.55.0
|
||||
github.com/prometheus/prometheus v0.52.1
|
||||
github.com/prometheus/prometheus v0.53.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
)
|
||||
|
||||
|
|
100
go.mod
100
go.mod
|
@ -1,22 +1,24 @@
|
|||
module github.com/prometheus/prometheus
|
||||
|
||||
go 1.21
|
||||
go 1.21.0
|
||||
|
||||
toolchain go1.22.5
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
|
||||
github.com/Code-Hex/go-generics-cache v1.5.1
|
||||
github.com/KimMachineGun/automemlimit v0.6.1
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
|
||||
github.com/aws/aws-sdk-go v1.53.16
|
||||
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
|
||||
github.com/aws/aws-sdk-go v1.54.19
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.117.0
|
||||
github.com/docker/docker v26.1.3+incompatible
|
||||
github.com/digitalocean/godo v1.118.0
|
||||
github.com/docker/docker v27.0.3+incompatible
|
||||
github.com/edsrzf/mmap-go v1.1.0
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4
|
||||
|
@ -29,75 +31,75 @@ require (
|
|||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba
|
||||
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gophercloud/gophercloud v1.12.0
|
||||
github.com/gophercloud/gophercloud v1.13.0
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.29.1
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0
|
||||
github.com/hashicorp/consul/api v1.29.2
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.1.11
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/klauspost/compress v1.17.9
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.35.0
|
||||
github.com/miekg/dns v1.1.59
|
||||
github.com/linode/linodego v1.37.0
|
||||
github.com/miekg/dns v1.1.61
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/ovh/go-ovh v1.5.1
|
||||
github.com/ovh/go-ovh v1.6.0
|
||||
github.com/prometheus/alertmanager v0.27.0
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/common v0.54.0
|
||||
github.com/prometheus/common v0.55.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.11.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/pdata v1.8.0
|
||||
go.opentelemetry.io/collector/semconv v0.101.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
|
||||
go.opentelemetry.io/otel v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.27.0
|
||||
go.opentelemetry.io/collector/pdata v1.12.0
|
||||
go.opentelemetry.io/collector/semconv v0.105.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/net v0.27.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.21.0
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.22.0
|
||||
google.golang.org/api v0.183.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
|
||||
google.golang.org/grpc v1.64.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
golang.org/x/tools v0.23.0
|
||||
google.golang.org/api v0.188.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.29.3
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.5.1 // indirect
|
||||
cloud.google.com/go/auth v0.7.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.4.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
|
@ -105,7 +107,7 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cilium/ebpf v0.11.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.3 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
|
@ -119,7 +121,7 @@ require (
|
|||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.22.2 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
|
@ -132,7 +134,7 @@ require (
|
|||
github.com/go-resty/resty/v2 v2.13.1 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/glog v1.2.0 // indirect
|
||||
github.com/golang/glog v1.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
|
@ -140,7 +142,7 @@ require (
|
|||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
|
@ -176,20 +178,20 @@ require (
|
|||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/crypto v0.25.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/term v0.22.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
|
|
197
go.sum
197
go.sum
|
@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
|
|||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
|
||||
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
|
||||
cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
|
||||
cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
|
@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
|
||||
cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
|
@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
|
||||
|
@ -75,8 +75,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs=
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
|
||||
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
|
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
|
|||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
|
||||
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
|
||||
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
|
@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
|
|||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
|
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
|
|||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
|
||||
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
|
||||
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
|
||||
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -210,8 +210,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=
|
||||
|
@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g=
|
||||
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
|
||||
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
|
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
|
|||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
|
||||
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
||||
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
|
||||
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
|
||||
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -353,10 +353,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
|
|||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
|
||||
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
|
||||
github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg=
|
||||
github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
|
||||
github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
|
||||
github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
|
||||
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
|
||||
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
|
||||
|
@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
|||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
@ -409,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
|
|||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -454,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
|||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
|
||||
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
|
||||
github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
|
||||
github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
@ -500,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU
|
|||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
||||
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
|
@ -573,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
|
|||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
|
||||
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
|
||||
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
|
@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
|
||||
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
|
@ -639,8 +639,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
|
@ -650,8 +650,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
|
|||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
|
||||
|
@ -694,6 +694,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
|
@ -723,28 +724,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
|
||||
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
|
||||
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
|
||||
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
||||
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA=
|
||||
go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
|
||||
go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
|
||||
go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
|
@ -773,8 +774,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -809,8 +810,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -856,8 +857,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -946,16 +947,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1025,8 +1026,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1046,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
|||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
|
||||
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
|
||||
google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
|
||||
google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1084,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1106,8 +1107,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1119,8 +1120,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte {
|
|||
b.WriteByte(labelSep)
|
||||
for i, l := range ls {
|
||||
if i > 0 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(l.Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(l.Value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
}
|
||||
|
@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
i++
|
||||
default:
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
i++
|
||||
j++
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, ls[i].Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, ls[i].Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
|
|||
i++
|
||||
default:
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(ls[i].Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(ls[i].Value)
|
||||
i++
|
||||
j++
|
||||
|
@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
|
|||
continue
|
||||
}
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(ls[i].Name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(ls[i].Value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
|
|
@ -29,10 +29,11 @@ const (
|
|||
BucketLabel = "le"
|
||||
InstanceName = "instance"
|
||||
|
||||
labelSep = '\xfe'
|
||||
labelSep = '\xfe' // Used at beginning of `Bytes` return.
|
||||
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
|
||||
)
|
||||
|
||||
var seps = []byte{'\xff'}
|
||||
var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
|
||||
|
||||
// Label is a key/value pair of strings.
|
||||
type Label struct {
|
||||
|
|
|
@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte {
|
|||
b := bytes.NewBuffer(buf[:0])
|
||||
for i := 0; i < len(ls.data); {
|
||||
if i > 0 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
var name, value string
|
||||
name, i = decodeString(ls.syms, ls.data, i)
|
||||
value, i = decodeString(ls.syms, ls.data, i)
|
||||
b.WriteString(name)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(value)
|
||||
}
|
||||
return b.Bytes()
|
||||
|
@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 {
|
|||
}
|
||||
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
pos = newPos
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
|
@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
}
|
||||
if name == names[j] {
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
|
|||
}
|
||||
if lName == names[j] {
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(lName)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(lValue)
|
||||
}
|
||||
pos = newPos
|
||||
|
@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
|
|||
}
|
||||
if j == len(names) || lName != names[j] {
|
||||
if b.Len() > 1 {
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
}
|
||||
b.WriteString(lName)
|
||||
b.WriteByte(seps[0])
|
||||
b.WriteByte(sep)
|
||||
b.WriteString(lValue)
|
||||
}
|
||||
pos = newPos
|
||||
|
|
|
@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
}
|
||||
if name == names[j] {
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
|
|||
continue
|
||||
}
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b), b
|
||||
}
|
||||
|
|
|
@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
}
|
||||
|
|
|
@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
pos = newPos
|
||||
}
|
||||
return xxhash.Sum64(b)
|
||||
|
|
|
@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
|
|||
}
|
||||
|
||||
b = append(b, v.Name...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
b = append(b, v.Value...)
|
||||
b = append(b, seps[0])
|
||||
b = append(b, sep)
|
||||
}
|
||||
if h != nil {
|
||||
return h.Sum64()
|
||||
|
|
|
@ -55,6 +55,11 @@ const (
|
|||
DefaultMaxSamplesPerQuery = 10000
|
||||
)
|
||||
|
||||
type TBRun interface {
|
||||
testing.TB
|
||||
Run(string, func(*testing.T)) bool
|
||||
}
|
||||
|
||||
var testStartTime = time.Unix(0, 0).UTC()
|
||||
|
||||
// LoadedStorage returns storage with generated data using the provided load statements.
|
||||
|
@ -100,7 +105,7 @@ func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine
|
|||
}
|
||||
|
||||
// RunBuiltinTests runs an acceptance test suite against the provided engine.
|
||||
func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) {
|
||||
func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
|
||||
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
|
||||
parser.EnableExperimentalFunctions = true
|
||||
|
||||
|
|
191
promql/promqltest/testdata/histograms.test
vendored
191
promql/promqltest/testdata/histograms.test
vendored
|
@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3)
|
|||
{start="positive"} 110
|
||||
{start="negative"} 20
|
||||
|
||||
# Classic way of accessing the count still works.
|
||||
eval instant at 50m testhistogram3_count
|
||||
testhistogram3_count{start="positive"} 110
|
||||
testhistogram3_count{start="negative"} 20
|
||||
|
||||
# Test histogram_sum.
|
||||
eval instant at 50m histogram_sum(testhistogram3)
|
||||
{start="positive"} 330
|
||||
{start="negative"} 80
|
||||
|
||||
# Test histogram_avg.
|
||||
# Classic way of accessing the sum still works.
|
||||
eval instant at 50m testhistogram3_sum
|
||||
testhistogram3_sum{start="positive"} 330
|
||||
testhistogram3_sum{start="negative"} 80
|
||||
|
||||
# Test histogram_avg. This has no classic equivalent.
|
||||
eval instant at 50m histogram_avg(testhistogram3)
|
||||
{start="positive"} 3
|
||||
{start="negative"} 4
|
||||
|
||||
# Test histogram_stddev.
|
||||
# Test histogram_stddev. This has no classic equivalent.
|
||||
eval instant at 50m histogram_stddev(testhistogram3)
|
||||
{start="positive"} 2.8189265757336734
|
||||
{start="negative"} 4.182715937754936
|
||||
|
||||
# Test histogram_stdvar.
|
||||
# Test histogram_stdvar. This has no classic equivalent.
|
||||
eval instant at 50m histogram_stdvar(testhistogram3)
|
||||
{start="positive"} 7.946347039377573
|
||||
{start="negative"} 17.495112615949154
|
||||
|
@ -103,137 +113,282 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
|
|||
{start="positive"} 0.6363636363636364
|
||||
{start="negative"} 0
|
||||
|
||||
# Test histogram_quantile.
|
||||
# In the classic histogram, we can access the corresponding bucket (if
|
||||
# it exists) and divide by the count to get the same result.
|
||||
|
||||
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
|
||||
{start="positive"} 0.6363636363636364
|
||||
|
||||
# Test histogram_quantile, native and classic.
|
||||
|
||||
eval instant at 50m histogram_quantile(0, testhistogram3)
|
||||
{start="positive"} 0
|
||||
{start="negative"} -0.25
|
||||
|
||||
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
|
||||
{start="positive"} 0
|
||||
{start="negative"} -0.25
|
||||
|
||||
eval instant at 50m histogram_quantile(0.25, testhistogram3)
|
||||
{start="positive"} 0.055
|
||||
{start="negative"} -0.225
|
||||
|
||||
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
|
||||
{start="positive"} 0.055
|
||||
{start="negative"} -0.225
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram3)
|
||||
{start="positive"} 0.125
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
|
||||
{start="positive"} 0.125
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, testhistogram3)
|
||||
{start="positive"} 0.45
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
|
||||
{start="positive"} 0.45
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(1, testhistogram3)
|
||||
{start="positive"} 1
|
||||
{start="negative"} -0.1
|
||||
|
||||
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
|
||||
{start="positive"} 1
|
||||
{start="negative"} -0.1
|
||||
|
||||
# Quantile too low.
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
|
||||
{start="positive"} -Inf
|
||||
{start="negative"} -Inf
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
||||
{start="positive"} -Inf
|
||||
{start="negative"} -Inf
|
||||
|
||||
# Quantile too high.
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
|
||||
{start="positive"} +Inf
|
||||
{start="negative"} +Inf
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
||||
{start="positive"} +Inf
|
||||
{start="negative"} +Inf
|
||||
|
||||
# Quantile invalid.
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
|
||||
{start="positive"} NaN
|
||||
{start="negative"} NaN
|
||||
|
||||
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
||||
{start="positive"} NaN
|
||||
{start="negative"} NaN
|
||||
|
||||
# Quantile value in lowest bucket.
|
||||
|
||||
eval instant at 50m histogram_quantile(0, testhistogram)
|
||||
{start="positive"} 0
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
|
||||
{start="positive"} 0
|
||||
{start="negative"} -0.2
|
||||
|
||||
# Quantile value in highest bucket.
|
||||
|
||||
eval instant at 50m histogram_quantile(1, testhistogram)
|
||||
{start="positive"} 1
|
||||
{start="negative"} 0.3
|
||||
|
||||
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
|
||||
{start="positive"} 1
|
||||
{start="negative"} 0.3
|
||||
|
||||
# Finally some useful quantiles.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, testhistogram)
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram)
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, testhistogram)
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
# More realistic with rates.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
|
||||
{start="positive"} 0.048
|
||||
{start="negative"} -0.2
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
|
||||
{start="positive"} 0.15
|
||||
{start="negative"} -0.15
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
|
||||
{start="positive"} 0.72
|
||||
{start="negative"} 0.3
|
||||
|
||||
# Want results exactly in the middle of the bucket.
|
||||
|
||||
eval instant at 7m histogram_quantile(1./6., testhistogram2)
|
||||
{} 1
|
||||
|
||||
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
|
||||
{} 1
|
||||
|
||||
eval instant at 7m histogram_quantile(0.5, testhistogram2)
|
||||
{} 3
|
||||
|
||||
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
|
||||
{} 3
|
||||
|
||||
eval instant at 7m histogram_quantile(5./6., testhistogram2)
|
||||
{} 5
|
||||
|
||||
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
|
||||
{} 5
|
||||
|
||||
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
|
||||
{} 1
|
||||
|
||||
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
|
||||
{} 1
|
||||
|
||||
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
|
||||
{} 3
|
||||
|
||||
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
|
||||
{} 3
|
||||
|
||||
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
|
||||
{} 5
|
||||
|
||||
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
|
||||
{} 5
|
||||
|
||||
# Aggregated histogram: Everything in one.
|
||||
# Aggregated histogram: Everything in one. Note how native histograms
|
||||
# don't require aggregation by le.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
|
||||
{} 0.1277777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
{} 0.1277777777777778
|
||||
|
||||
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
{} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
|
||||
{} 0.12777777777777778
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
|
||||
{} 0.12777777777777778
|
||||
|
||||
# Aggregated histogram: By instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
{instance="ins1"} 0.075
|
||||
{instance="ins2"} 0.075
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
|
||||
{instance="ins1"} 0.1333333333
|
||||
{instance="ins2"} 0.125
|
||||
|
||||
# Aggregated histogram: By job.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
{job="job1"} 0.1
|
||||
{job="job2"} 0.0642857142857143
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
|
||||
{job="job1"} 0.14
|
||||
{job="job2"} 0.1125
|
||||
|
||||
# Aggregated histogram: By job and instance.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.1333333333333333
|
||||
|
@ -241,18 +396,32 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
|
|||
{instance="ins2", job="job2"} 0.1166666666666667
|
||||
|
||||
# The unaggregated histogram for comparison. Same result as the previous one.
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
|
||||
{instance="ins1", job="job1"} 0.11
|
||||
{instance="ins2", job="job1"} 0.09
|
||||
{instance="ins1", job="job2"} 0.06
|
||||
{instance="ins2", job="job2"} 0.0675
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.11666666666666667
|
||||
|
||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
|
||||
{instance="ins1", job="job1"} 0.15
|
||||
{instance="ins2", job="job1"} 0.13333333333333333
|
||||
{instance="ins1", job="job2"} 0.1
|
||||
{instance="ins2", job="job2"} 0.11666666666666667
|
||||
|
||||
# All NHCBs summed into one.
|
||||
eval instant at 50m sum(request_duration_seconds)
|
||||
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
||||
|
||||
|
@ -303,11 +472,13 @@ load_with_nhcb 5m
|
|||
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
|
||||
{instance="ins1", job="job1"} NaN
|
||||
|
||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
|
||||
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
|
||||
# https://github.com/prometheus/prometheus/issues/9910
|
||||
load_with_nhcb 5m
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||
|
||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})
|
||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
|
||||
|
||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
|
||||
|
|
|
@ -621,14 +621,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
// If the rule has no dependencies, it can run concurrently because no other rules in this group depend on its output.
|
||||
// Try run concurrently if there are slots available.
|
||||
if ctrl := g.concurrencyController; isRuleEligibleForConcurrentExecution(rule) && ctrl.Allow() {
|
||||
if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
|
||||
wg.Add(1)
|
||||
|
||||
go eval(i, rule, func() {
|
||||
wg.Done()
|
||||
ctrl.Done()
|
||||
ctrl.Done(ctx)
|
||||
})
|
||||
} else {
|
||||
eval(i, rule, nil)
|
||||
|
@ -1094,7 +1092,3 @@ func buildDependencyMap(rules []Rule) dependencyMap {
|
|||
|
||||
return dependencies
|
||||
}
|
||||
|
||||
func isRuleEligibleForConcurrentExecution(rule Rule) bool {
|
||||
return rule.NoDependentRules() && rule.NoDependencyRules()
|
||||
}
|
||||
|
|
|
@ -457,67 +457,47 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) {
|
|||
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
|
||||
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
|
||||
type RuleConcurrencyController interface {
|
||||
// Allow determines whether any concurrent evaluation slots are available.
|
||||
// If Allow() returns true, then Done() must be called to release the acquired slot.
|
||||
Allow() bool
|
||||
// Allow determines if the given rule is allowed to be evaluated concurrently.
|
||||
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
|
||||
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
|
||||
Allow(ctx context.Context, group *Group, rule Rule) bool
|
||||
|
||||
// Done releases a concurrent evaluation slot.
|
||||
Done()
|
||||
Done(ctx context.Context)
|
||||
}
|
||||
|
||||
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
|
||||
type concurrentRuleEvalController struct {
|
||||
sema *semaphore.Weighted
|
||||
depMapsMu sync.Mutex
|
||||
depMaps map[*Group]dependencyMap
|
||||
sema *semaphore.Weighted
|
||||
}
|
||||
|
||||
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
|
||||
return &concurrentRuleEvalController{
|
||||
sema: semaphore.NewWeighted(maxConcurrency),
|
||||
depMaps: map[*Group]dependencyMap{},
|
||||
sema: semaphore.NewWeighted(maxConcurrency),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) RuleEligible(g *Group, r Rule) bool {
|
||||
c.depMapsMu.Lock()
|
||||
defer c.depMapsMu.Unlock()
|
||||
|
||||
depMap, found := c.depMaps[g]
|
||||
if !found {
|
||||
depMap = buildDependencyMap(g.rules)
|
||||
c.depMaps[g] = depMap
|
||||
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
|
||||
// To allow a rule to be executed concurrently, we need 3 conditions:
|
||||
// 1. The rule must not have any rules that depend on it.
|
||||
// 2. The rule itself must not depend on any other rules.
|
||||
// 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
|
||||
if rule.NoDependentRules() && rule.NoDependencyRules() {
|
||||
return c.sema.TryAcquire(1)
|
||||
}
|
||||
|
||||
return depMap.isIndependent(r)
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Allow() bool {
|
||||
return c.sema.TryAcquire(1)
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Done() {
|
||||
func (c *concurrentRuleEvalController) Done(_ context.Context) {
|
||||
c.sema.Release(1)
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Invalidate() {
|
||||
c.depMapsMu.Lock()
|
||||
defer c.depMapsMu.Unlock()
|
||||
|
||||
// Clear out the memoized dependency maps because some or all groups may have been updated.
|
||||
c.depMaps = map[*Group]dependencyMap{}
|
||||
}
|
||||
|
||||
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
|
||||
type sequentialRuleEvalController struct{}
|
||||
|
||||
func (c sequentialRuleEvalController) RuleEligible(_ *Group, _ Rule) bool {
|
||||
func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) Allow() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) Done() {}
|
||||
func (c sequentialRuleEvalController) Invalidate() {}
|
||||
func (c sequentialRuleEvalController) Done(_ context.Context) {}
|
||||
|
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
|
||||
with:
|
||||
|
|
|
@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
|
|||
return b.it.AtFloatHistogram(nil)
|
||||
}
|
||||
|
||||
// AtT returns the timestamp of the current element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtT() int64 {
|
||||
return b.it.AtT()
|
||||
}
|
||||
|
||||
// Err returns the last encountered error.
|
||||
func (b *MemoizedSeriesIterator) Err() error {
|
||||
return b.it.Err()
|
||||
|
|
|
@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) {
|
|||
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
|
||||
if efh == nil {
|
||||
ts, v := it.At()
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, ev, v, "value mismatch")
|
||||
require.Equal(t, ets, ts, "At() timestamp mismatch")
|
||||
require.Equal(t, ev, v, "At() value mismatch")
|
||||
} else {
|
||||
ts, fh := it.AtFloatHistogram()
|
||||
require.Equal(t, ets, ts, "timestamp mismatch")
|
||||
require.Equal(t, efh, fh, "histogram mismatch")
|
||||
require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch")
|
||||
require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch")
|
||||
}
|
||||
|
||||
require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch")
|
||||
}
|
||||
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
|
||||
ts, v, fh, ok := it.PeekPrev()
|
||||
|
|
|
@ -45,25 +45,24 @@ type mergeGenericQuerier struct {
|
|||
//
|
||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
|
||||
primaries = filterQueriers(primaries)
|
||||
secondaries = filterQueriers(secondaries)
|
||||
|
||||
switch {
|
||||
case len(primaries)+len(secondaries) == 0:
|
||||
case len(primaries) == 0 && len(secondaries) == 0:
|
||||
return noopQuerier{}
|
||||
case len(primaries) == 1 && len(secondaries) == 0:
|
||||
return primaries[0]
|
||||
case len(primaries) == 0 && len(secondaries) == 1:
|
||||
return secondaries[0]
|
||||
return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])}
|
||||
}
|
||||
|
||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||
for _, q := range primaries {
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newGenericQuerierFrom(q))
|
||||
}
|
||||
queriers = append(queriers, newGenericQuerierFrom(q))
|
||||
}
|
||||
for _, q := range secondaries {
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||
}
|
||||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
|
@ -77,31 +76,40 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
|
|||
}}
|
||||
}
|
||||
|
||||
func filterQueriers(qs []Querier) []Querier {
|
||||
ret := make([]Querier, 0, len(qs))
|
||||
for _, q := range qs {
|
||||
if _, ok := q.(noopQuerier); !ok && q != nil {
|
||||
ret = append(ret, q)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers.
|
||||
// See NewFanout commentary to learn more about primary vs secondary differences.
|
||||
//
|
||||
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
|
||||
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
|
||||
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
|
||||
primaries = filterChunkQueriers(primaries)
|
||||
secondaries = filterChunkQueriers(secondaries)
|
||||
|
||||
switch {
|
||||
case len(primaries) == 0 && len(secondaries) == 0:
|
||||
return noopChunkQuerier{}
|
||||
case len(primaries) == 1 && len(secondaries) == 0:
|
||||
return primaries[0]
|
||||
case len(primaries) == 0 && len(secondaries) == 1:
|
||||
return secondaries[0]
|
||||
return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])}
|
||||
}
|
||||
|
||||
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
|
||||
for _, q := range primaries {
|
||||
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
||||
queriers = append(queriers, newGenericQuerierFromChunk(q))
|
||||
}
|
||||
queriers = append(queriers, newGenericQuerierFromChunk(q))
|
||||
}
|
||||
for _, querier := range secondaries {
|
||||
if _, ok := querier.(noopChunkQuerier); !ok && querier != nil {
|
||||
queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
|
||||
}
|
||||
for _, q := range secondaries {
|
||||
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
|
@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
|
|||
}}
|
||||
}
|
||||
|
||||
func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
|
||||
ret := make([]ChunkQuerier, 0, len(qs))
|
||||
for _, q := range qs {
|
||||
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
|
||||
ret = append(ret, q)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Select returns a set of series that matches the given label matchers.
|
||||
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
|
||||
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
|
||||
|
|
|
@ -912,9 +912,23 @@ func TestConcatenatingChunkIterator(t *testing.T) {
|
|||
}
|
||||
|
||||
type mockQuerier struct {
|
||||
LabelQuerier
|
||||
mtx sync.Mutex
|
||||
|
||||
toReturn []Series
|
||||
toReturn []Series // Response for Select.
|
||||
|
||||
closed bool
|
||||
labelNamesCalls int
|
||||
labelNamesRequested []labelNameRequest
|
||||
sortedSeriesRequested []bool
|
||||
|
||||
resp []string // Response for LabelNames and LabelValues; turned into Select response if toReturn is not supplied.
|
||||
warnings annotations.Annotations
|
||||
err error
|
||||
}
|
||||
|
||||
type labelNameRequest struct {
|
||||
name string
|
||||
matchers []*labels.Matcher
|
||||
}
|
||||
|
||||
type seriesByLabel []Series
|
||||
|
@ -924,13 +938,47 @@ func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|||
func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
|
||||
|
||||
func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet {
|
||||
cpy := make([]Series, len(m.toReturn))
|
||||
copy(cpy, m.toReturn)
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
m.sortedSeriesRequested = append(m.sortedSeriesRequested, sortSeries)
|
||||
|
||||
var ret []Series
|
||||
if len(m.toReturn) > 0 {
|
||||
ret = make([]Series, len(m.toReturn))
|
||||
copy(ret, m.toReturn)
|
||||
} else if len(m.resp) > 0 {
|
||||
ret = make([]Series, 0, len(m.resp))
|
||||
for _, l := range m.resp {
|
||||
ret = append(ret, NewListSeries(labels.FromStrings("test", l), nil))
|
||||
}
|
||||
}
|
||||
if sortSeries {
|
||||
sort.Sort(seriesByLabel(cpy))
|
||||
sort.Sort(seriesByLabel(ret))
|
||||
}
|
||||
|
||||
return NewMockSeriesSet(cpy...)
|
||||
return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err}
|
||||
}
|
||||
|
||||
func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
m.mtx.Lock()
|
||||
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
|
||||
name: name,
|
||||
matchers: matchers,
|
||||
})
|
||||
m.mtx.Unlock()
|
||||
return m.resp, m.warnings, m.err
|
||||
}
|
||||
|
||||
func (m *mockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
m.mtx.Lock()
|
||||
m.labelNamesCalls++
|
||||
m.mtx.Unlock()
|
||||
return m.resp, m.warnings, m.err
|
||||
}
|
||||
|
||||
func (m *mockQuerier) Close() error {
|
||||
m.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockChunkQuerier struct {
|
||||
|
@ -960,6 +1008,9 @@ func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectH
|
|||
type mockSeriesSet struct {
|
||||
idx int
|
||||
series []Series
|
||||
|
||||
warnings annotations.Annotations
|
||||
err error
|
||||
}
|
||||
|
||||
func NewMockSeriesSet(series ...Series) SeriesSet {
|
||||
|
@ -970,15 +1021,18 @@ func NewMockSeriesSet(series ...Series) SeriesSet {
|
|||
}
|
||||
|
||||
func (m *mockSeriesSet) Next() bool {
|
||||
if m.err != nil {
|
||||
return false
|
||||
}
|
||||
m.idx++
|
||||
return m.idx < len(m.series)
|
||||
}
|
||||
|
||||
func (m *mockSeriesSet) At() Series { return m.series[m.idx] }
|
||||
|
||||
func (m *mockSeriesSet) Err() error { return nil }
|
||||
func (m *mockSeriesSet) Err() error { return m.err }
|
||||
|
||||
func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil }
|
||||
func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings }
|
||||
|
||||
type mockChunkSeriesSet struct {
|
||||
idx int
|
||||
|
@ -1336,105 +1390,44 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
type mockGenericQuerier struct {
|
||||
mtx sync.Mutex
|
||||
|
||||
closed bool
|
||||
labelNamesCalls int
|
||||
labelNamesRequested []labelNameRequest
|
||||
sortedSeriesRequested []bool
|
||||
|
||||
resp []string
|
||||
warnings annotations.Annotations
|
||||
err error
|
||||
}
|
||||
|
||||
type labelNameRequest struct {
|
||||
name string
|
||||
matchers []*labels.Matcher
|
||||
}
|
||||
|
||||
func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet {
|
||||
m.mtx.Lock()
|
||||
m.sortedSeriesRequested = append(m.sortedSeriesRequested, b)
|
||||
m.mtx.Unlock()
|
||||
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
|
||||
}
|
||||
|
||||
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
m.mtx.Lock()
|
||||
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
|
||||
name: name,
|
||||
matchers: matchers,
|
||||
})
|
||||
m.mtx.Unlock()
|
||||
return m.resp, m.warnings, m.err
|
||||
}
|
||||
|
||||
func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
|
||||
m.mtx.Lock()
|
||||
m.labelNamesCalls++
|
||||
m.mtx.Unlock()
|
||||
return m.resp, m.warnings, m.err
|
||||
}
|
||||
|
||||
func (m *mockGenericQuerier) Close() error {
|
||||
m.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockGenericSeriesSet struct {
|
||||
resp []string
|
||||
warnings annotations.Annotations
|
||||
err error
|
||||
|
||||
curr int
|
||||
}
|
||||
|
||||
func (m *mockGenericSeriesSet) Next() bool {
|
||||
if m.err != nil {
|
||||
return false
|
||||
func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int {
|
||||
count := 0
|
||||
switch x := qr.(type) {
|
||||
case *mockQuerier:
|
||||
count++
|
||||
f(t, x)
|
||||
case *querierAdapter:
|
||||
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
|
||||
}
|
||||
if m.curr >= len(m.resp) {
|
||||
return false
|
||||
return count
|
||||
}
|
||||
|
||||
func visitMockQueriersInGenericQuerier(t *testing.T, g genericQuerier, f func(t *testing.T, q *mockQuerier)) int {
|
||||
count := 0
|
||||
switch x := g.(type) {
|
||||
case *mergeGenericQuerier:
|
||||
for _, q := range x.queriers {
|
||||
count += visitMockQueriersInGenericQuerier(t, q, f)
|
||||
}
|
||||
case *genericQuerierAdapter:
|
||||
// Visitor for chunkQuerier not implemented.
|
||||
count += visitMockQueriers(t, x.q, f)
|
||||
case *secondaryQuerier:
|
||||
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
|
||||
}
|
||||
m.curr++
|
||||
return true
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *mockGenericSeriesSet) Err() error { return m.err }
|
||||
func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings }
|
||||
|
||||
func (m *mockGenericSeriesSet) At() Labels {
|
||||
return mockLabels(m.resp[m.curr-1])
|
||||
}
|
||||
|
||||
type mockLabels string
|
||||
|
||||
func (l mockLabels) Labels() labels.Labels {
|
||||
return labels.FromStrings("test", string(l))
|
||||
}
|
||||
|
||||
func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier {
|
||||
m, ok := qr.(*mockGenericQuerier)
|
||||
if !ok {
|
||||
s, ok := qr.(*secondaryQuerier)
|
||||
require.True(t, ok, "expected secondaryQuerier got something else")
|
||||
m, ok = s.genericQuerier.(*mockGenericQuerier)
|
||||
require.True(t, ok, "expected mockGenericQuerier got something else")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
||||
func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
||||
var (
|
||||
errStorage = errors.New("storage error")
|
||||
warnStorage = errors.New("storage warning")
|
||||
ctx = context.Background()
|
||||
)
|
||||
for _, tcase := range []struct {
|
||||
name string
|
||||
queriers []genericQuerier
|
||||
name string
|
||||
primaries []Querier
|
||||
secondaries []Querier
|
||||
|
||||
expectedSelectsSeries []labels.Labels
|
||||
expectedLabels []string
|
||||
|
@ -1443,10 +1436,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
expectedErrs [4]error
|
||||
}{
|
||||
{
|
||||
// NewMergeQuerier will not create a mergeGenericQuerier
|
||||
// with just one querier inside, but we can test it anyway.
|
||||
name: "one successful primary querier",
|
||||
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
|
||||
name: "one successful primary querier",
|
||||
primaries: []Querier{&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "a"),
|
||||
labels.FromStrings("test", "b"),
|
||||
|
@ -1455,9 +1446,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "multiple successful primary queriers",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
|
||||
&mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
|
||||
&mockQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
|
||||
},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "a"),
|
||||
|
@ -1468,15 +1459,17 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one failed primary querier",
|
||||
queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}},
|
||||
primaries: []Querier{&mockQuerier{warnings: nil, err: errStorage}},
|
||||
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
|
||||
},
|
||||
{
|
||||
name: "one successful primary querier with successful secondaries",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
|
||||
},
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
|
||||
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
|
||||
},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "a"),
|
||||
|
@ -1487,10 +1480,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one successful primary querier with empty response and successful secondaries",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{resp: []string{}, warnings: nil, err: nil},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{resp: []string{}, warnings: nil, err: nil},
|
||||
},
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
|
||||
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
|
||||
},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "b"),
|
||||
|
@ -1500,19 +1495,42 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "one failed primary querier with successful secondaries",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{warnings: nil, err: errStorage},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{warnings: nil, err: errStorage},
|
||||
},
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
|
||||
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
|
||||
},
|
||||
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
|
||||
},
|
||||
{
|
||||
name: "nil primary querier with failed secondary",
|
||||
primaries: nil,
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
|
||||
},
|
||||
expectedLabels: []string{},
|
||||
expectedWarnings: annotations.New().Add(errStorage),
|
||||
},
|
||||
{
|
||||
name: "nil primary querier with two failed secondaries",
|
||||
primaries: nil,
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
|
||||
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
|
||||
},
|
||||
expectedLabels: []string{},
|
||||
expectedWarnings: annotations.New().Add(errStorage),
|
||||
},
|
||||
{
|
||||
name: "one successful primary querier with failed secondaries",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{resp: []string{"a"}, warnings: nil, err: nil},
|
||||
},
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
|
||||
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
|
||||
},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "a"),
|
||||
|
@ -1522,9 +1540,11 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "successful queriers with warnings",
|
||||
queriers: []genericQuerier{
|
||||
&mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
|
||||
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}},
|
||||
primaries: []Querier{
|
||||
&mockQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
|
||||
},
|
||||
secondaries: []Querier{
|
||||
&mockQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil},
|
||||
},
|
||||
expectedSelectsSeries: []labels.Labels{
|
||||
labels.FromStrings("test", "a"),
|
||||
|
@ -1535,10 +1555,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
q := &mergeGenericQuerier{
|
||||
queriers: tcase.queriers,
|
||||
mergeFn: func(l ...Labels) Labels { return l[0] },
|
||||
}
|
||||
q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] })
|
||||
|
||||
t.Run("Select", func(t *testing.T) {
|
||||
res := q.Select(context.Background(), false, nil)
|
||||
|
@ -1551,65 +1568,70 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
|||
require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match")
|
||||
require.Equal(t, tcase.expectedSelectsSeries, lbls)
|
||||
|
||||
for _, qr := range q.queriers {
|
||||
m := unwrapMockGenericQuerier(t, qr)
|
||||
// mergeGenericQuerier forces all Selects to be sorted.
|
||||
require.Equal(t, []bool{true}, m.sortedSeriesRequested)
|
||||
}
|
||||
n := visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
|
||||
// Single queries should be unsorted; merged queries sorted.
|
||||
exp := len(tcase.primaries)+len(tcase.secondaries) > 1
|
||||
require.Equal(t, []bool{exp}, m.sortedSeriesRequested)
|
||||
})
|
||||
// Check we visited all queriers.
|
||||
require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n)
|
||||
})
|
||||
t.Run("LabelNames", func(t *testing.T) {
|
||||
res, w, err := q.LabelNames(ctx, nil)
|
||||
require.Subset(t, tcase.expectedWarnings, w)
|
||||
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
|
||||
require.Equal(t, tcase.expectedLabels, res)
|
||||
requireEqualSlice(t, tcase.expectedLabels, res)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, qr := range q.queriers {
|
||||
m := unwrapMockGenericQuerier(t, qr)
|
||||
|
||||
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
|
||||
require.Equal(t, 1, m.labelNamesCalls)
|
||||
}
|
||||
})
|
||||
})
|
||||
t.Run("LabelValues", func(t *testing.T) {
|
||||
res, w, err := q.LabelValues(ctx, "test", nil)
|
||||
require.Subset(t, tcase.expectedWarnings, w)
|
||||
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
|
||||
require.Equal(t, tcase.expectedLabels, res)
|
||||
requireEqualSlice(t, tcase.expectedLabels, res)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, qr := range q.queriers {
|
||||
m := unwrapMockGenericQuerier(t, qr)
|
||||
|
||||
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
|
||||
require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested)
|
||||
}
|
||||
})
|
||||
})
|
||||
t.Run("LabelValuesWithMatchers", func(t *testing.T) {
|
||||
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
|
||||
res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
|
||||
require.Subset(t, tcase.expectedWarnings, w)
|
||||
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
|
||||
require.Equal(t, tcase.expectedLabels, res)
|
||||
requireEqualSlice(t, tcase.expectedLabels, res)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, qr := range q.queriers {
|
||||
m := unwrapMockGenericQuerier(t, qr)
|
||||
|
||||
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
|
||||
require.Equal(t, []labelNameRequest{
|
||||
{name: "test"},
|
||||
{name: "test2", matchers: []*labels.Matcher{matcher}},
|
||||
}, m.labelNamesRequested)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Check slice but ignore difference between nil and empty.
|
||||
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) {
|
||||
if len(a) == 0 {
|
||||
require.Empty(t, b, msgAndArgs...)
|
||||
} else {
|
||||
require.Equal(t, a, b, msgAndArgs...)
|
||||
}
|
||||
}
|
||||
|
||||
type errIterator struct {
|
||||
err error
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
@ -235,12 +234,12 @@ type RecoverableError struct {
|
|||
|
||||
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
|
||||
// and encoded bytes from codec.go.
|
||||
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
||||
func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) {
|
||||
httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req))
|
||||
if err != nil {
|
||||
// Errors from NewRequest are from unparsable URLs, so are not
|
||||
// recoverable.
|
||||
return err
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
|
||||
|
@ -267,28 +266,34 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
|||
if err != nil {
|
||||
// Errors from Client.Do are from (for example) network errors, so are
|
||||
// recoverable.
|
||||
return RecoverableError{err, defaultBackoff}
|
||||
return WriteResponseStats{}, RecoverableError{err, defaultBackoff}
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(io.Discard, httpResp.Body)
|
||||
httpResp.Body.Close()
|
||||
}()
|
||||
|
||||
// TODO(bwplotka): Pass logger and emit debug on error?
|
||||
// Parsing error means there were some response header values we can't parse,
|
||||
// we can continue handling.
|
||||
rs, _ := ParseWriteResponseStats(httpResp)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 != 2 {
|
||||
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||
line := ""
|
||||
if scanner.Scan() {
|
||||
line = scanner.Text()
|
||||
}
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
||||
if httpResp.StatusCode/100 == 2 {
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
// Handling errors e.g. read potential error in the body.
|
||||
// TODO(bwplotka): Pass logger and emit debug on error?
|
||||
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
||||
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
|
||||
|
||||
//nolint:usestdlibvars
|
||||
if httpResp.StatusCode/100 == 5 ||
|
||||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
||||
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
||||
}
|
||||
return err
|
||||
return rs, err
|
||||
}
|
||||
|
||||
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it
|
||||
|
|
|
@ -73,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
|
|||
c, err := NewWriteClient(hash, conf)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = c.Store(context.Background(), []byte{}, 0)
|
||||
_, err = c.Store(context.Background(), []byte{}, 0)
|
||||
if test.err != nil {
|
||||
require.EqualError(t, err, test.err.Error())
|
||||
} else {
|
||||
|
@ -133,7 +133,7 @@ func TestClientRetryAfter(t *testing.T) {
|
|||
c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit))
|
||||
|
||||
var recErr RecoverableError
|
||||
err = c.Store(context.Background(), []byte{}, 0)
|
||||
_, err = c.Store(context.Background(), []byte{}, 0)
|
||||
require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.")
|
||||
if tc.expectedRecoverable {
|
||||
require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter)
|
||||
|
@ -169,7 +169,7 @@ func TestRetryAfterDuration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestClientHeaders(t *testing.T) {
|
||||
func TestClientCustomHeaders(t *testing.T) {
|
||||
headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"}
|
||||
|
||||
var called bool
|
||||
|
@ -203,7 +203,7 @@ func TestClientHeaders(t *testing.T) {
|
|||
c, err := NewWriteClient("c", conf)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = c.Store(context.Background(), []byte{}, 0)
|
||||
_, err = c.Store(context.Background(), []byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, called, "The remote server wasn't called")
|
||||
|
|
|
@ -65,14 +65,14 @@ type bucketBoundsData struct {
|
|||
bound float64
|
||||
}
|
||||
|
||||
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds
|
||||
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds.
|
||||
type byBucketBoundsData []bucketBoundsData
|
||||
|
||||
func (m byBucketBoundsData) Len() int { return len(m) }
|
||||
func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
|
||||
func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
||||
|
||||
// ByLabelName enables the usage of sort.Sort() with a slice of labels
|
||||
// ByLabelName enables the usage of sort.Sort() with a slice of labels.
|
||||
type ByLabelName []prompb.Label
|
||||
|
||||
func (a ByLabelName) Len() int { return len(a) }
|
||||
|
@ -115,14 +115,23 @@ var seps = []byte{'\xff'}
|
|||
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
|
||||
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
|
||||
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
|
||||
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
|
||||
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings,
|
||||
ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
|
||||
resourceAttrs := resource.Attributes()
|
||||
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
|
||||
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
|
||||
|
||||
promotedAttrs := make([]prompb.Label, 0, len(settings.PromoteResourceAttributes))
|
||||
for _, name := range settings.PromoteResourceAttributes {
|
||||
if value, exists := resourceAttrs.Get(name); exists {
|
||||
promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
|
||||
}
|
||||
}
|
||||
sort.Stable(ByLabelName(promotedAttrs))
|
||||
|
||||
// Calculate the maximum possible number of labels we could return so we can preallocate l
|
||||
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
|
||||
maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + len(extras)/2
|
||||
|
||||
if haveServiceName {
|
||||
maxLabelCount++
|
||||
|
@ -132,9 +141,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
maxLabelCount++
|
||||
}
|
||||
|
||||
// map ensures no duplicate label name
|
||||
l := make(map[string]string, maxLabelCount)
|
||||
|
||||
// Ensure attributes are sorted by key for consistent merging of keys which
|
||||
// collide when sanitized.
|
||||
labels := make([]prompb.Label, 0, maxLabelCount)
|
||||
|
@ -148,6 +154,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
})
|
||||
sort.Stable(ByLabelName(labels))
|
||||
|
||||
// map ensures no duplicate label names.
|
||||
l := make(map[string]string, maxLabelCount)
|
||||
for _, label := range labels {
|
||||
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
|
||||
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
||||
|
@ -157,6 +165,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
}
|
||||
}
|
||||
|
||||
for _, lbl := range promotedAttrs {
|
||||
normalized := prometheustranslator.NormalizeLabel(lbl.Name)
|
||||
if _, exists := l[normalized]; !exists {
|
||||
l[normalized] = lbl.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Map service.name + service.namespace to job
|
||||
if haveServiceName {
|
||||
val := serviceName.AsString()
|
||||
|
@ -169,7 +184,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
if haveInstanceID {
|
||||
l[model.InstanceLabel] = instance.AsString()
|
||||
}
|
||||
for key, value := range externalLabels {
|
||||
for key, value := range settings.ExternalLabels {
|
||||
// External labels have already been sanitized
|
||||
if _, alreadyExists := l[key]; alreadyExists {
|
||||
// Skip external labels if they are overridden by metric attributes
|
||||
|
@ -232,7 +247,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra
|
|||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
|
||||
|
||||
// If the sum is unset, it indicates the _sum metric point should be
|
||||
// omitted
|
||||
|
@ -408,7 +423,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat
|
|||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
timestamp := convertTimeStamp(pt.Timestamp())
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
|
||||
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
|
||||
|
||||
// treat sum as a sample in an individual TimeSeries
|
||||
sum := &prompb.Sample{
|
||||
|
@ -554,7 +569,8 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
|
|||
name = settings.Namespace + "_" + name
|
||||
}
|
||||
|
||||
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
|
||||
settings.PromoteResourceAttributes = nil
|
||||
labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name)
|
||||
haveIdentifier := false
|
||||
for _, l := range labels {
|
||||
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package prometheusremotewrite
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
func TestCreateAttributes(t *testing.T) {
|
||||
resourceAttrs := map[string]string{
|
||||
"service.name": "service name",
|
||||
"service.instance.id": "service ID",
|
||||
"existent-attr": "resource value",
|
||||
// This one is for testing conflict with metric attribute.
|
||||
"metric-attr": "resource value",
|
||||
// This one is for testing conflict with auto-generated job attribute.
|
||||
"job": "resource value",
|
||||
// This one is for testing conflict with auto-generated instance attribute.
|
||||
"instance": "resource value",
|
||||
}
|
||||
|
||||
resource := pcommon.NewResource()
|
||||
for k, v := range resourceAttrs {
|
||||
resource.Attributes().PutStr(k, v)
|
||||
}
|
||||
attrs := pcommon.NewMap()
|
||||
attrs.PutStr("__name__", "test_metric")
|
||||
attrs.PutStr("metric-attr", "metric value")
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
promoteResourceAttributes []string
|
||||
expectedLabels []prompb.Label
|
||||
}{
|
||||
{
|
||||
name: "Successful conversion without resource attribute promotion",
|
||||
promoteResourceAttributes: nil,
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion",
|
||||
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion, conflicting resource attributes are ignored",
|
||||
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Successful conversion with resource attribute promotion, attributes are only promoted once",
|
||||
promoteResourceAttributes: []string{"existent-attr", "existent-attr"},
|
||||
expectedLabels: []prompb.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: "test_metric",
|
||||
},
|
||||
{
|
||||
Name: "instance",
|
||||
Value: "service ID",
|
||||
},
|
||||
{
|
||||
Name: "job",
|
||||
Value: "service name",
|
||||
},
|
||||
{
|
||||
Name: "existent_attr",
|
||||
Value: "resource value",
|
||||
},
|
||||
{
|
||||
Name: "metric_attr",
|
||||
Value: "metric value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
settings := Settings{
|
||||
PromoteResourceAttributes: tc.promoteResourceAttributes,
|
||||
}
|
||||
lbls := createAttributes(resource, attrs, settings, nil, false)
|
||||
|
||||
assert.ElementsMatch(t, lbls, tc.expectedLabels)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -45,7 +45,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
|||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
|
|
@ -30,12 +30,13 @@ import (
|
|||
)
|
||||
|
||||
type Settings struct {
|
||||
Namespace string
|
||||
ExternalLabels map[string]string
|
||||
DisableTargetInfo bool
|
||||
ExportCreatedMetric bool
|
||||
AddMetricSuffixes bool
|
||||
SendMetadata bool
|
||||
Namespace string
|
||||
ExternalLabels map[string]string
|
||||
DisableTargetInfo bool
|
||||
ExportCreatedMetric bool
|
||||
AddMetricSuffixes bool
|
||||
SendMetadata bool
|
||||
PromoteResourceAttributes []string
|
||||
}
|
||||
|
||||
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
|
||||
|
|
|
@ -34,7 +34,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number
|
|||
labels := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
@ -64,7 +64,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa
|
|||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
settings.ExternalLabels,
|
||||
settings,
|
||||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
|
|
|
@ -391,7 +391,7 @@ func (m *queueManagerMetrics) unregister() {
|
|||
// external timeseries database.
|
||||
type WriteClient interface {
|
||||
// Store stores the given samples in the remote storage.
|
||||
Store(ctx context.Context, req []byte, retryAttempt int) error
|
||||
Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error)
|
||||
// Name uniquely identifies the remote storage.
|
||||
Name() string
|
||||
// Endpoint is the remote read or write endpoint for the storage client.
|
||||
|
@ -597,14 +597,15 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
|
|||
}
|
||||
|
||||
begin := time.Now()
|
||||
err := t.storeClient.Store(ctx, req, try)
|
||||
// Ignoring WriteResponseStats, because there is nothing for metadata, since it's
|
||||
// embedded in v2 calls now, and we do v1 here.
|
||||
_, err := t.storeClient.Store(ctx, req, try)
|
||||
t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1661,8 +1662,8 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen
|
|||
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||
begin := time.Now()
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin))
|
||||
rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1670,17 +1671,29 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
|||
// See https://github.com/prometheus/prometheus/issues/14409
|
||||
func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||
begin := time.Now()
|
||||
err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin))
|
||||
rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) {
|
||||
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, rs WriteResponseStats, duration time.Duration) {
|
||||
// Partial errors may happen -- account for that.
|
||||
sampleDiff := sampleCount - rs.Samples
|
||||
if sampleDiff > 0 {
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleDiff))
|
||||
}
|
||||
histogramDiff := histogramCount - rs.Histograms
|
||||
if histogramDiff > 0 {
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramDiff))
|
||||
}
|
||||
exemplarDiff := exemplarCount - rs.Exemplars
|
||||
if exemplarDiff > 0 {
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff))
|
||||
}
|
||||
if err != nil {
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err)
|
||||
} else if sampleDiff+exemplarDiff+histogramDiff > 0 {
|
||||
level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff)
|
||||
}
|
||||
|
||||
// These counters are used to calculate the dynamic sharding, and as such
|
||||
|
@ -1688,6 +1701,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
|
|||
s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
|
||||
s.qm.dataOutDuration.incr(int64(duration))
|
||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||
|
||||
// Pending samples/exemplars/histograms also should be subtracted, as an error means
|
||||
// they will not be retried.
|
||||
s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
|
||||
|
@ -1699,19 +1713,29 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl
|
|||
}
|
||||
|
||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
return err
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
reqSize := len(req)
|
||||
*buf = req
|
||||
|
||||
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
|
||||
// to track the total amount of accepted data across the various attempts.
|
||||
accumulatedStats := WriteResponseStats{}
|
||||
var accumulatedStatsMu sync.Mutex
|
||||
addStats := func(rs WriteResponseStats) {
|
||||
accumulatedStatsMu.Lock()
|
||||
accumulatedStats = accumulatedStats.Add(rs)
|
||||
accumulatedStatsMu.Unlock()
|
||||
}
|
||||
|
||||
// An anonymous function allows us to defer the completion of our per-try spans
|
||||
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||
// parameters for sendSamplesWithBackoff/3.
|
||||
|
@ -1759,15 +1783,19 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
err := s.qm.client().Store(ctx, *buf, try)
|
||||
// Technically for v1, we will likely have empty response stats, but for
|
||||
// newer Receivers this might be not, so used it in a best effort.
|
||||
rs, err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
|
||||
// so far we don't have those, so it's ok to potentially skew statistics.
|
||||
addStats(rs)
|
||||
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
onRetry := func() {
|
||||
|
@ -1780,29 +1808,48 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return err
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
|
||||
return err
|
||||
if err == nil && !accumulatedStats.Confirmed {
|
||||
// No 2.0 response headers, and we sent v1 message, so likely it's 1.0 Receiver.
|
||||
// Assume success, don't rely on headers.
|
||||
return WriteResponseStats{
|
||||
Samples: sampleCount,
|
||||
Histograms: histogramCount,
|
||||
Exemplars: exemplarCount,
|
||||
}, nil
|
||||
}
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
// sendV2Samples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
return err
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
reqSize := len(req)
|
||||
*buf = req
|
||||
|
||||
// Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need
|
||||
// to track the total amount of accepted data across the various attempts.
|
||||
accumulatedStats := WriteResponseStats{}
|
||||
var accumulatedStatsMu sync.Mutex
|
||||
addStats := func(rs WriteResponseStats) {
|
||||
accumulatedStatsMu.Lock()
|
||||
accumulatedStats = accumulatedStats.Add(rs)
|
||||
accumulatedStatsMu.Unlock()
|
||||
}
|
||||
|
||||
// An anonymous function allows us to defer the completion of our per-try spans
|
||||
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||
// parameters for sendSamplesWithBackoff/3.
|
||||
|
@ -1850,15 +1897,28 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2
|
|||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
err := s.qm.client().Store(ctx, *buf, try)
|
||||
rs, err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
// TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error
|
||||
// so far we don't have those, so it's ok to potentially skew statistics.
|
||||
addStats(rs)
|
||||
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
if err == nil {
|
||||
// Check the case mentioned in PRW 2.0
|
||||
// https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers.
|
||||
if sampleCount+histogramCount+exemplarCount > 0 && rs.NoDataWritten() {
|
||||
err = fmt.Errorf("sent v2 request with %v samples, %v histograms and %v exemplars; got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms and %v exemplars were accepted;"+
|
||||
" assumining failure e.g. the target only supports PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly",
|
||||
sampleCount, histogramCount, exemplarCount,
|
||||
rs.Samples, rs.Histograms, rs.Exemplars,
|
||||
)
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
onRetry := func() {
|
||||
|
@ -1871,13 +1931,12 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2
|
|||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return err
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
|
||||
return err
|
||||
return accumulatedStats, err
|
||||
}
|
||||
|
||||
func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
|
||||
|
|
|
@ -118,10 +118,10 @@ func TestBasicContentNegotiation(t *testing.T) {
|
|||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "v2 talks to v1 that tries to unmarshal v2 payload with v1 proto",
|
||||
name: "v2 talks to (broken) v1 that tries to unmarshal v2 payload with v1 proto",
|
||||
senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
|
||||
injectErrs: []error{nil},
|
||||
expectFail: true, // invalid request, no timeseries
|
||||
expectFail: true, // We detect this thanks to https://github.com/prometheus/prometheus/issues/14359
|
||||
},
|
||||
// Opposite, v1 talking to v2 only server.
|
||||
{
|
||||
|
@ -130,12 +130,6 @@ func TestBasicContentNegotiation(t *testing.T) {
|
|||
injectErrs: []error{errors.New("pretend unrecoverable err")},
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "v1 talks to (broken) v2 that tries to unmarshal v1 payload with v2 proto",
|
||||
senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
|
||||
injectErrs: []error{nil},
|
||||
expectFail: true, // invalid request, no timeseries
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
@ -182,7 +176,6 @@ func TestBasicContentNegotiation(t *testing.T) {
|
|||
if !tc.expectFail {
|
||||
// No error expected, so wait for data.
|
||||
c.waitForExpectedData(t, 5*time.Second)
|
||||
require.Equal(t, 1, c.writesReceived)
|
||||
require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal))
|
||||
} else {
|
||||
// Wait for failure to be recorded in metrics.
|
||||
|
@ -190,11 +183,10 @@ func TestBasicContentNegotiation(t *testing.T) {
|
|||
defer cancel()
|
||||
require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error {
|
||||
if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 {
|
||||
return errors.New("expected one sample failed in qm metrics")
|
||||
return fmt.Errorf("expected one sample failed in qm metrics; got %v", client_testutil.ToFloat64(qm.metrics.failedSamplesTotal))
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, 0, c.writesReceived)
|
||||
}
|
||||
|
||||
// samplesTotal means attempts.
|
||||
|
@ -764,10 +756,10 @@ func TestDisableReshardOnRetry(t *testing.T) {
|
|||
metrics = newQueueManagerMetrics(nil, "", "")
|
||||
|
||||
client = &MockWriteClient{
|
||||
StoreFunc: func(ctx context.Context, b []byte, i int) error {
|
||||
StoreFunc: func(ctx context.Context, b []byte, i int) (WriteResponseStats, error) {
|
||||
onStoreCalled()
|
||||
|
||||
return RecoverableError{
|
||||
return WriteResponseStats{}, RecoverableError{
|
||||
error: fmt.Errorf("fake error"),
|
||||
retryAfter: model.Duration(retryAfter),
|
||||
}
|
||||
|
@ -1113,14 +1105,14 @@ func (c *TestWriteClient) SetReturnError(err error) {
|
|||
c.returnError = err
|
||||
}
|
||||
|
||||
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
||||
func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) (WriteResponseStats, error) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
if c.storeWait > 0 {
|
||||
time.Sleep(c.storeWait)
|
||||
}
|
||||
if c.returnError != nil {
|
||||
return c.returnError
|
||||
return WriteResponseStats{}, c.returnError
|
||||
}
|
||||
// nil buffers are ok for snappy, ignore cast error.
|
||||
if c.buf != nil {
|
||||
|
@ -1130,14 +1122,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
|||
reqBuf, err := snappy.Decode(c.buf, req)
|
||||
c.buf = reqBuf
|
||||
if err != nil {
|
||||
return err
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
// Check if we've been told to inject err for this call.
|
||||
if len(c.injectedErrs) > 0 {
|
||||
c.currErr++
|
||||
if err = c.injectedErrs[c.currErr]; err != nil {
|
||||
return err
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1156,13 +1148,10 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(reqProto.Timeseries) == 0 && len(reqProto.Metadata) == 0 {
|
||||
return errors.New("invalid request, no timeseries")
|
||||
return WriteResponseStats{}, err
|
||||
}
|
||||
|
||||
rs := WriteResponseStats{}
|
||||
b := labels.NewScratchBuilder(0)
|
||||
for _, ts := range reqProto.Timeseries {
|
||||
labels := ts.ToLabels(&b, nil)
|
||||
|
@ -1170,10 +1159,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
|||
if len(ts.Samples) > 0 {
|
||||
c.receivedSamples[tsID] = append(c.receivedSamples[tsID], ts.Samples...)
|
||||
}
|
||||
rs.Samples += len(ts.Samples)
|
||||
|
||||
if len(ts.Exemplars) > 0 {
|
||||
c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ts.Exemplars...)
|
||||
}
|
||||
rs.Exemplars += len(ts.Exemplars)
|
||||
|
||||
for _, h := range ts.Histograms {
|
||||
if h.IsFloatHistogram() {
|
||||
|
@ -1182,13 +1173,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
|
|||
c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], h)
|
||||
}
|
||||
}
|
||||
rs.Histograms += len(ts.Histograms)
|
||||
}
|
||||
for _, m := range reqProto.Metadata {
|
||||
c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m)
|
||||
}
|
||||
|
||||
c.writesReceived++
|
||||
return nil
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (c *TestWriteClient) Name() string {
|
||||
|
@ -1256,10 +1248,10 @@ func NewTestBlockedWriteClient() *TestBlockingWriteClient {
|
|||
return &TestBlockingWriteClient{}
|
||||
}
|
||||
|
||||
func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) error {
|
||||
func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) (WriteResponseStats, error) {
|
||||
c.numCalls.Inc()
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
return WriteResponseStats{}, nil
|
||||
}
|
||||
|
||||
func (c *TestBlockingWriteClient) NumCalls() uint64 {
|
||||
|
@ -1278,19 +1270,19 @@ func (c *TestBlockingWriteClient) Endpoint() string {
|
|||
type NopWriteClient struct{}
|
||||
|
||||
func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
|
||||
func (c *NopWriteClient) Store(context.Context, []byte, int) error {
|
||||
return nil
|
||||
func (c *NopWriteClient) Store(context.Context, []byte, int) (WriteResponseStats, error) {
|
||||
return WriteResponseStats{}, nil
|
||||
}
|
||||
func (c *NopWriteClient) Name() string { return "nopwriteclient" }
|
||||
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
|
||||
|
||||
type MockWriteClient struct {
|
||||
StoreFunc func(context.Context, []byte, int) error
|
||||
StoreFunc func(context.Context, []byte, int) (WriteResponseStats, error)
|
||||
NameFunc func() string
|
||||
EndpointFunc func() string
|
||||
}
|
||||
|
||||
func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) error {
|
||||
func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) (WriteResponseStats, error) {
|
||||
return c.StoreFunc(ctx, bb, n)
|
||||
}
|
||||
func (c *MockWriteClient) Name() string { return c.NameFunc() }
|
||||
|
|
107
storage/remote/stats.go
Normal file
107
storage/remote/stats.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written"
|
||||
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written"
|
||||
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written"
|
||||
)
|
||||
|
||||
// WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486
|
||||
type WriteResponseStats struct {
|
||||
// Samples represents X-Prometheus-Remote-Write-Written-Samples
|
||||
Samples int
|
||||
// Histograms represents X-Prometheus-Remote-Write-Written-Histograms
|
||||
Histograms int
|
||||
// Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
|
||||
Exemplars int
|
||||
|
||||
// Confirmed means we can trust those statistics from the point of view
|
||||
// of the PRW 2.0 spec. When parsed from headers, it means we got at least one
|
||||
// response header from the Receiver to confirm those numbers, meaning it must
|
||||
// be a at least 2.0 Receiver. See ParseWriteResponseStats for details.
|
||||
Confirmed bool
|
||||
}
|
||||
|
||||
// NoDataWritten returns true if statistics indicate no data was written.
|
||||
func (s WriteResponseStats) NoDataWritten() bool {
|
||||
return (s.Samples + s.Histograms + s.Exemplars) == 0
|
||||
}
|
||||
|
||||
// AllSamples returns both float and histogram sample numbers.
|
||||
func (s WriteResponseStats) AllSamples() int {
|
||||
return s.Samples + s.Histograms
|
||||
}
|
||||
|
||||
// Add returns the sum of this WriteResponseStats plus the given WriteResponseStats.
|
||||
func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats {
|
||||
s.Confirmed = rs.Confirmed
|
||||
s.Samples += rs.Samples
|
||||
s.Histograms += rs.Histograms
|
||||
s.Exemplars += rs.Exemplars
|
||||
return s
|
||||
}
|
||||
|
||||
// SetHeaders sets response headers in a given response writer.
|
||||
// Make sure to use it before http.ResponseWriter.WriteHeader and .Write.
|
||||
func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) {
|
||||
h := w.Header()
|
||||
h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples))
|
||||
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms))
|
||||
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars))
|
||||
}
|
||||
|
||||
// ParseWriteResponseStats returns WriteResponseStats parsed from the response headers.
|
||||
//
|
||||
// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers
|
||||
// or buggy 2.0 Receivers might result in no response headers specified and that
|
||||
// might NOT necessarily mean nothing was written. To represent that we set
|
||||
// s.Confirmed = true only when see at least on response header.
|
||||
//
|
||||
// Error is returned when any of the header fails to parse as int64.
|
||||
func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) {
|
||||
var (
|
||||
errs []error
|
||||
h = r.Header
|
||||
)
|
||||
if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Samples, err = strconv.Atoi(v); err != nil {
|
||||
s.Samples = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Histograms, err = strconv.Atoi(v); err != nil {
|
||||
s.Histograms = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero.
|
||||
s.Confirmed = true
|
||||
if s.Exemplars, err = strconv.Atoi(v); err != nil {
|
||||
s.Exemplars = 0
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return s, errors.Join(errs...)
|
||||
}
|
|
@ -19,7 +19,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -201,7 +200,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
|
||||
|
||||
// Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
|
||||
respStats.SetResponseHeaders(w.Header())
|
||||
respStats.SetHeaders(w)
|
||||
|
||||
if err != nil {
|
||||
if errHTTPCode/5 == 100 { // 5xx
|
||||
|
@ -318,24 +317,6 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist
|
|||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples"
|
||||
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms"
|
||||
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars"
|
||||
)
|
||||
|
||||
type responseStats struct {
|
||||
samples int
|
||||
histograms int
|
||||
exemplars int
|
||||
}
|
||||
|
||||
func (s responseStats) SetResponseHeaders(h http.Header) {
|
||||
h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples))
|
||||
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms))
|
||||
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars))
|
||||
}
|
||||
|
||||
// writeV2 is similar to write, but it works with v2 proto message,
|
||||
// allows partial 4xx writes and gathers statistics.
|
||||
//
|
||||
|
@ -345,14 +326,14 @@ func (s responseStats) SetResponseHeaders(h http.Header) {
|
|||
//
|
||||
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
|
||||
// Once we have 5xx type of error, we immediately stop and rollback all appends.
|
||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) {
|
||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) {
|
||||
app := &timeLimitAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
|
||||
rs := responseStats{}
|
||||
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs)
|
||||
s := WriteResponseStats{}
|
||||
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s)
|
||||
if err != nil {
|
||||
if errHTTPCode/5 == 100 {
|
||||
// On 5xx, we always rollback, because we expect
|
||||
|
@ -360,29 +341,29 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ res
|
|||
if rerr := app.Rollback(); rerr != nil {
|
||||
level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
|
||||
}
|
||||
return responseStats{}, errHTTPCode, err
|
||||
return WriteResponseStats{}, errHTTPCode, err
|
||||
}
|
||||
|
||||
// Non-retriable (e.g. bad request error case). Can be partially written.
|
||||
commitErr := app.Commit()
|
||||
if commitErr != nil {
|
||||
// Bad requests does not matter as we have internal error (retryable).
|
||||
return responseStats{}, http.StatusInternalServerError, commitErr
|
||||
return WriteResponseStats{}, http.StatusInternalServerError, commitErr
|
||||
}
|
||||
// Bad request error happened, but rest of data (if any) was written.
|
||||
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
|
||||
return rs, errHTTPCode, err
|
||||
return s, errHTTPCode, err
|
||||
}
|
||||
|
||||
// All good just commit.
|
||||
if err := app.Commit(); err != nil {
|
||||
return responseStats{}, http.StatusInternalServerError, err
|
||||
return WriteResponseStats{}, http.StatusInternalServerError, err
|
||||
}
|
||||
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
|
||||
return rs, 0, nil
|
||||
return s, 0, nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
|
||||
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
|
||||
var (
|
||||
badRequestErrs []error
|
||||
outOfOrderExemplarErrs, samplesWithInvalidLabels int
|
||||
|
@ -400,14 +381,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
continue
|
||||
}
|
||||
|
||||
allSamplesSoFar := rs.samples + rs.histograms
|
||||
allSamplesSoFar := rs.AllSamples()
|
||||
var ref storage.SeriesRef
|
||||
|
||||
// Samples.
|
||||
for _, s := range ts.Samples {
|
||||
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
|
||||
if err == nil {
|
||||
rs.samples++
|
||||
rs.Samples++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
|
@ -431,7 +412,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||
}
|
||||
if err == nil {
|
||||
rs.histograms++
|
||||
rs.Histograms++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
|
@ -453,18 +434,19 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
e := ep.ToExemplar(&b, req.Symbols)
|
||||
ref, err = app.AppendExemplar(ref, ls, e)
|
||||
if err == nil {
|
||||
rs.exemplars++
|
||||
rs.Exemplars++
|
||||
continue
|
||||
}
|
||||
// Handle append error.
|
||||
// TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms.
|
||||
// Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors.
|
||||
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
outOfOrderExemplarErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here.
|
||||
level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
|
||||
continue
|
||||
}
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
||||
// TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed.
|
||||
// For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx.
|
||||
level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
|
||||
}
|
||||
|
||||
m := ts.ToMetadata(req.Symbols)
|
||||
|
@ -472,7 +454,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
|
||||
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
|
||||
// we don't report remote write error either. We increment metric instead.
|
||||
samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar
|
||||
samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -490,21 +472,23 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
|
|||
|
||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler {
|
||||
rwHandler := &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
}
|
||||
|
||||
return &otlpWriteHandler{
|
||||
logger: logger,
|
||||
rwHandler: rwHandler,
|
||||
logger: logger,
|
||||
rwHandler: rwHandler,
|
||||
configFunc: configFunc,
|
||||
}
|
||||
}
|
||||
|
||||
type otlpWriteHandler struct {
|
||||
logger log.Logger
|
||||
rwHandler *writeHandler
|
||||
logger log.Logger
|
||||
rwHandler *writeHandler
|
||||
configFunc func() config.Config
|
||||
}
|
||||
|
||||
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -515,9 +499,12 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
otlpCfg := h.configFunc().OTLPConfig
|
||||
|
||||
converter := otlptranslator.NewPrometheusConverter()
|
||||
if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{
|
||||
AddMetricSuffixes: true,
|
||||
AddMetricSuffixes: true,
|
||||
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
|
||||
}); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||
}
|
||||
|
|
|
@ -398,7 +398,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||
{
|
||||
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
|
||||
input: writeV2RequestFixture.Timeseries,
|
||||
appendExemplarErr: errors.New("some exemplar append error"),
|
||||
appendExemplarErr: errors.New("some exemplar internal append error"),
|
||||
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
|
@ -449,9 +449,9 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||
|
||||
if tc.expectedCode == http.StatusInternalServerError {
|
||||
// We don't expect writes for partial writes with retry-able code.
|
||||
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
|
||||
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
|
||||
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
|
||||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenSamplesHeader))
|
||||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader))
|
||||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
|
||||
|
||||
require.Empty(t, len(appendable.samples))
|
||||
require.Empty(t, len(appendable.histograms))
|
||||
|
@ -462,12 +462,12 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
|||
|
||||
// Double check mandatory 2.0 stats.
|
||||
// writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each.
|
||||
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
|
||||
expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
|
||||
expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader))
|
||||
expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader))
|
||||
if tc.appendExemplarErr != nil {
|
||||
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
|
||||
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
|
||||
} else {
|
||||
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
|
||||
expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenExemplarsHeader))
|
||||
}
|
||||
|
||||
// Double check what was actually appended.
|
||||
|
|
|
@ -379,7 +379,11 @@ func TestOTLPWriteHandler(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewOTLPWriteHandler(nil, appendable)
|
||||
handler := NewOTLPWriteHandler(nil, appendable, func() config.Config {
|
||||
return config.Config{
|
||||
OTLPConfig: config.DefaultOTLPConfig,
|
||||
}
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.25.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.opentelemetry.io/otel/trace/noop"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
|
|
@ -2099,6 +2099,7 @@ type memSeries struct {
|
|||
|
||||
nextAt int64 // Timestamp at which to cut the next chunk.
|
||||
histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise.
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
|
||||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastValue float64
|
||||
|
@ -2114,8 +2115,6 @@ type memSeries struct {
|
|||
|
||||
// txs is nil if isolation is disabled.
|
||||
txs *txRing
|
||||
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
}
|
||||
|
||||
// memSeriesOOOFields contains the fields required by memSeries
|
||||
|
|
|
@ -846,16 +846,17 @@ func (a *headAppender) Commit() (err error) {
|
|||
// number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
|
||||
floatOOBRejected int
|
||||
|
||||
inOrderMint int64 = math.MaxInt64
|
||||
inOrderMaxt int64 = math.MinInt64
|
||||
ooomint int64 = math.MaxInt64
|
||||
ooomaxt int64 = math.MinInt64
|
||||
wblSamples []record.RefSample
|
||||
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef
|
||||
oooRecords [][]byte
|
||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||
series *memSeries
|
||||
appendChunkOpts = chunkOpts{
|
||||
inOrderMint int64 = math.MaxInt64
|
||||
inOrderMaxt int64 = math.MinInt64
|
||||
oooMinT int64 = math.MaxInt64
|
||||
oooMaxT int64 = math.MinInt64
|
||||
wblSamples []record.RefSample
|
||||
oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
|
||||
oooMmapMarkersCount int
|
||||
oooRecords [][]byte
|
||||
oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
|
||||
series *memSeries
|
||||
appendChunkOpts = chunkOpts{
|
||||
chunkDiskMapper: a.head.chunkDiskMapper,
|
||||
chunkRange: a.head.chunkRange.Load(),
|
||||
samplesPerChunk: a.head.opts.SamplesPerChunk,
|
||||
|
@ -872,6 +873,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
// WBL is not enabled. So no need to collect.
|
||||
wblSamples = nil
|
||||
oooMmapMarkers = nil
|
||||
oooMmapMarkersCount = 0
|
||||
return
|
||||
}
|
||||
// The m-map happens before adding a new sample. So we collect
|
||||
|
@ -880,12 +882,14 @@ func (a *headAppender) Commit() (err error) {
|
|||
// WBL Before this Commit(): [old samples before this commit for chunk 1]
|
||||
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
|
||||
if oooMmapMarkers != nil {
|
||||
markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers))
|
||||
for ref, mmapRef := range oooMmapMarkers {
|
||||
markers = append(markers, record.RefMmapMarker{
|
||||
Ref: ref,
|
||||
MmapRef: mmapRef,
|
||||
})
|
||||
markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount)
|
||||
for ref, mmapRefs := range oooMmapMarkers {
|
||||
for _, mmapRef := range mmapRefs {
|
||||
markers = append(markers, record.RefMmapMarker{
|
||||
Ref: ref,
|
||||
MmapRef: mmapRef,
|
||||
})
|
||||
}
|
||||
}
|
||||
r := enc.MmapMarkers(markers, a.head.getBytesBuffer())
|
||||
oooRecords = append(oooRecords, r)
|
||||
|
@ -928,32 +932,39 @@ func (a *headAppender) Commit() (err error) {
|
|||
case oooSample:
|
||||
// Sample is OOO and OOO handling is enabled
|
||||
// and the delta is within the OOO tolerance.
|
||||
var mmapRef chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
|
||||
var mmapRefs []chunks.ChunkDiskMapperRef
|
||||
ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
|
||||
if chunkCreated {
|
||||
r, ok := oooMmapMarkers[series.ref]
|
||||
if !ok || r != 0 {
|
||||
if !ok || r != nil {
|
||||
// !ok means there are no markers collected for these samples yet. So we first flush the samples
|
||||
// before setting this m-map marker.
|
||||
|
||||
// r != 0 means we have already m-mapped a chunk for this series in the same Commit().
|
||||
// r != nil means we have already m-mapped a chunk for this series in the same Commit().
|
||||
// Hence, before we m-map again, we should add the samples and m-map markers
|
||||
// seen till now to the WBL records.
|
||||
collectOOORecords()
|
||||
}
|
||||
|
||||
if oooMmapMarkers == nil {
|
||||
oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef)
|
||||
oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
|
||||
}
|
||||
if len(mmapRefs) > 0 {
|
||||
oooMmapMarkers[series.ref] = mmapRefs
|
||||
oooMmapMarkersCount += len(mmapRefs)
|
||||
} else {
|
||||
// No chunk was written to disk, so we need to set an initial marker for this series.
|
||||
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
|
||||
oooMmapMarkersCount++
|
||||
}
|
||||
oooMmapMarkers[series.ref] = mmapRef
|
||||
}
|
||||
if ok {
|
||||
wblSamples = append(wblSamples, s)
|
||||
if s.T < ooomint {
|
||||
ooomint = s.T
|
||||
if s.T < oooMinT {
|
||||
oooMinT = s.T
|
||||
}
|
||||
if s.T > ooomaxt {
|
||||
ooomaxt = s.T
|
||||
if s.T > oooMaxT {
|
||||
oooMaxT = s.T
|
||||
}
|
||||
floatOOOAccepted++
|
||||
} else {
|
||||
|
@ -1053,7 +1064,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
|
||||
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
|
||||
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
|
||||
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
|
||||
a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
|
||||
|
||||
collectOOORecords()
|
||||
if a.head.wbl != nil {
|
||||
|
@ -1069,14 +1080,14 @@ func (a *headAppender) Commit() (err error) {
|
|||
}
|
||||
|
||||
// insert is like append, except it inserts. Used for OOO samples.
|
||||
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) {
|
||||
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
|
||||
if s.ooo == nil {
|
||||
s.ooo = &memSeriesOOOFields{}
|
||||
}
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
|
||||
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
|
||||
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
|
||||
c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1100,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
|
|||
c.maxTime = t
|
||||
}
|
||||
}
|
||||
return ok, chunkCreated, mmapRef
|
||||
return ok, chunkCreated, mmapRefs
|
||||
}
|
||||
|
||||
// chunkOpts are chunk-level options that are passed when appending to a memSeries.
|
||||
|
@ -1431,7 +1442,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
|
|||
|
||||
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
|
||||
// The caller must ensure that s.ooo is not nil.
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) {
|
||||
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
|
||||
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
|
||||
|
||||
s.ooo.oooHeadChunk = &oooHeadChunk{
|
||||
|
@ -1443,21 +1454,29 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
|
|||
return s.ooo.oooHeadChunk, ref
|
||||
}
|
||||
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef {
|
||||
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
|
||||
if s.ooo == nil || s.ooo.oooHeadChunk == nil {
|
||||
// There is no head chunk, so nothing to m-map here.
|
||||
return 0
|
||||
// OOO is not enabled or there is no head chunk, so nothing to m-map here.
|
||||
return nil
|
||||
}
|
||||
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
|
||||
if err != nil {
|
||||
handleChunkWriteError(err)
|
||||
return nil
|
||||
}
|
||||
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
|
||||
for _, memchunk := range chks {
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
|
||||
chunkRefs = append(chunkRefs, chunkRef)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(memchunk.chunk.NumSamples()),
|
||||
minTime: memchunk.minTime,
|
||||
maxTime: memchunk.maxTime,
|
||||
})
|
||||
}
|
||||
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality.
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError)
|
||||
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(xor.NumSamples()),
|
||||
minTime: s.ooo.oooHeadChunk.minTime,
|
||||
maxTime: s.ooo.oooHeadChunk.maxTime,
|
||||
})
|
||||
s.ooo.oooHeadChunk = nil
|
||||
return chunkRef
|
||||
return chunkRefs
|
||||
}
|
||||
|
||||
// mmapChunks will m-map all but first chunk on s.headChunks list.
|
||||
|
|
|
@ -4730,6 +4730,14 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
|
|||
|
||||
// TestWBLReplay checks the replay at a low level.
|
||||
func TestWBLReplay(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testWBLReplay(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
|
||||
require.NoError(t, err)
|
||||
|
@ -4745,11 +4753,11 @@ func TestWBLReplay(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.NoError(t, h.Init(0))
|
||||
|
||||
var expOOOSamples []sample
|
||||
var expOOOSamples []chunks.Sample
|
||||
l := labels.FromStrings("foo", "bar")
|
||||
appendSample := func(mins int64, isOOO bool) {
|
||||
appendSample := func(mins int64, val float64, isOOO bool) {
|
||||
app := h.Appender(context.Background())
|
||||
ts, v := mins*time.Minute.Milliseconds(), float64(mins)
|
||||
ts, v := mins*time.Minute.Milliseconds(), val
|
||||
_, err := app.Append(0, l, ts, v)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -4760,15 +4768,15 @@ func TestWBLReplay(t *testing.T) {
|
|||
}
|
||||
|
||||
// In-order sample.
|
||||
appendSample(60, false)
|
||||
appendSample(60, 60, false)
|
||||
|
||||
// Out of order samples.
|
||||
appendSample(40, true)
|
||||
appendSample(35, true)
|
||||
appendSample(50, true)
|
||||
appendSample(55, true)
|
||||
appendSample(59, true)
|
||||
appendSample(31, true)
|
||||
appendSample(40, 40, true)
|
||||
appendSample(35, 35, true)
|
||||
appendSample(50, 50, true)
|
||||
appendSample(55, 55, true)
|
||||
appendSample(59, 59, true)
|
||||
appendSample(31, 31, true)
|
||||
|
||||
// Check that Head's time ranges are set properly.
|
||||
require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime())
|
||||
|
@ -4792,22 +4800,23 @@ func TestWBLReplay(t *testing.T) {
|
|||
require.False(t, ok)
|
||||
require.NotNil(t, ms)
|
||||
|
||||
xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR()
|
||||
chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, chks, 1)
|
||||
|
||||
it := xor.Iterator(nil)
|
||||
actOOOSamples := make([]sample, 0, len(expOOOSamples))
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
ts, v := it.At()
|
||||
actOOOSamples = append(actOOOSamples, sample{t: ts, f: v})
|
||||
}
|
||||
it := chks[0].chunk.Iterator(nil)
|
||||
actOOOSamples, err := storage.ExpandSamples(it, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// OOO chunk will be sorted. Hence sort the expected samples.
|
||||
sort.Slice(expOOOSamples, func(i, j int) bool {
|
||||
return expOOOSamples[i].t < expOOOSamples[j].t
|
||||
return expOOOSamples[i].T() < expOOOSamples[j].T()
|
||||
})
|
||||
|
||||
require.Equal(t, expOOOSamples, actOOOSamples)
|
||||
// Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
|
||||
// from being factored in to the sample comparison
|
||||
// TODO(fionaliao): understand counter reset behaviour, might want to modify this later
|
||||
requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true)
|
||||
|
||||
require.NoError(t, h.Close())
|
||||
}
|
||||
|
|
107
tsdb/ooo_head.go
107
tsdb/ooo_head.go
|
@ -17,9 +17,10 @@ import (
|
|||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/oklog/ulid"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
|
@ -74,24 +75,22 @@ func (o *OOOChunk) NumSamples() int {
|
|||
return len(o.samples)
|
||||
}
|
||||
|
||||
func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) {
|
||||
x := chunkenc.NewXORChunk()
|
||||
app, err := x.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range o.samples {
|
||||
app.Append(s.t, s.f)
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) {
|
||||
x := chunkenc.NewXORChunk()
|
||||
app, err := x.Appender()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// ToEncodedChunks returns chunks with the samples in the OOOChunk.
|
||||
//
|
||||
//nolint:revive // unexported-return.
|
||||
func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
|
||||
if len(o.samples) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
|
||||
chks = make([]memChunk, 0, 1)
|
||||
var (
|
||||
cmint int64
|
||||
cmaxt int64
|
||||
chunk chunkenc.Chunk
|
||||
app chunkenc.Appender
|
||||
)
|
||||
prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
|
||||
for _, s := range o.samples {
|
||||
if s.t < mint {
|
||||
continue
|
||||
|
@ -99,9 +98,77 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk,
|
|||
if s.t > maxt {
|
||||
break
|
||||
}
|
||||
app.Append(s.t, s.f)
|
||||
encoding := chunkenc.EncXOR
|
||||
if s.h != nil {
|
||||
encoding = chunkenc.EncHistogram
|
||||
} else if s.fh != nil {
|
||||
encoding = chunkenc.EncFloatHistogram
|
||||
}
|
||||
|
||||
// prevApp is the appender for the previous sample.
|
||||
prevApp := app
|
||||
|
||||
if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
|
||||
if prevEncoding != chunkenc.EncNone {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
cmint = s.t
|
||||
switch encoding {
|
||||
case chunkenc.EncXOR:
|
||||
chunk = chunkenc.NewXORChunk()
|
||||
case chunkenc.EncHistogram:
|
||||
chunk = chunkenc.NewHistogramChunk()
|
||||
case chunkenc.EncFloatHistogram:
|
||||
chunk = chunkenc.NewFloatHistogramChunk()
|
||||
default:
|
||||
chunk = chunkenc.NewXORChunk()
|
||||
}
|
||||
app, err = chunk.Appender()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
switch encoding {
|
||||
case chunkenc.EncXOR:
|
||||
app.Append(s.t, s.f)
|
||||
case chunkenc.EncHistogram:
|
||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
recoded bool
|
||||
)
|
||||
newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
|
||||
if newChunk != nil { // A new chunk was allocated.
|
||||
if !recoded {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
chunk = newChunk
|
||||
cmint = s.t
|
||||
}
|
||||
case chunkenc.EncFloatHistogram:
|
||||
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
|
||||
prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
recoded bool
|
||||
)
|
||||
newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
|
||||
if newChunk != nil { // A new chunk was allocated.
|
||||
if !recoded {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
chunk = newChunk
|
||||
cmint = s.t
|
||||
}
|
||||
}
|
||||
cmaxt = s.t
|
||||
prevEncoding = encoding
|
||||
}
|
||||
return x, nil
|
||||
if prevEncoding != chunkenc.EncNone {
|
||||
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
|
||||
}
|
||||
return chks, nil
|
||||
}
|
||||
|
||||
var _ BlockReader = &OOORangeHead{}
|
||||
|
|
|
@ -108,11 +108,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
c := s.ooo.oooHeadChunk
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
var xor chunkenc.Chunk
|
||||
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
|
||||
xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail.
|
||||
chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
|
||||
if err != nil {
|
||||
handleChunkWriteError(err)
|
||||
return nil
|
||||
}
|
||||
for _, chk := range chks {
|
||||
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
|
||||
}
|
||||
} else {
|
||||
var emptyChunk chunkenc.Chunk
|
||||
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
|
||||
}
|
||||
addChunk(c.minTime, c.maxTime, ref, xor)
|
||||
}
|
||||
}
|
||||
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
|
||||
|
@ -341,14 +349,20 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
|
|||
continue
|
||||
}
|
||||
|
||||
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
||||
if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
var lastMmapRef chunks.ChunkDiskMapperRef
|
||||
mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
|
||||
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
|
||||
mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref
|
||||
mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
|
||||
}
|
||||
seq, off := mmapRef.Unpack()
|
||||
if len(mmapRefs) == 0 {
|
||||
lastMmapRef = 0
|
||||
} else {
|
||||
lastMmapRef = mmapRefs[len(mmapRefs)-1]
|
||||
}
|
||||
seq, off := lastMmapRef.Unpack()
|
||||
if seq > lastSeq || (seq == lastSeq && off > lastOff) {
|
||||
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off
|
||||
ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
|
||||
}
|
||||
if len(ms.ooo.oooMmappedChunks) > 0 {
|
||||
ch.postings = append(ch.postings, seriesRef)
|
||||
|
|
|
@ -75,7 +75,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string {
|
|||
name = fmt.Sprintf(`<code class="text-nowrap">-%c</code>, <code class="text-nowrap">--%s</code>`, flag.Short, flag.Name)
|
||||
}
|
||||
|
||||
return []string{name, flag.Help, defaultVal}
|
||||
return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal}
|
||||
}
|
||||
|
||||
func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error {
|
||||
|
|
|
@ -295,7 +295,7 @@ func NewAPI(
|
|||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
|
||||
}
|
||||
if otlpEnabled {
|
||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc)
|
||||
}
|
||||
|
||||
return a
|
||||
|
|
|
@ -361,6 +361,7 @@ var samplePrometheusCfg = config.Config{
|
|||
ScrapeConfigs: []*config.ScrapeConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{},
|
||||
RemoteReadConfigs: []*config.RemoteReadConfig{},
|
||||
OTLPConfig: config.OTLPConfig{},
|
||||
}
|
||||
|
||||
var sampleFlagMap = map[string]string{
|
||||
|
|
|
@ -33,11 +33,11 @@
|
|||
"lru-cache": "^7.18.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@codemirror/autocomplete": "^6.16.2",
|
||||
"@codemirror/autocomplete": "^6.17.0",
|
||||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.0",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.22.1",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
|
|
55
web/ui/package-lock.json
generated
55
web/ui/package-lock.json
generated
|
@ -13,7 +13,7 @@
|
|||
],
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.5.12",
|
||||
"@types/node": "^20.14.2",
|
||||
"@types/node": "^20.14.9",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-config-react-app": "^7.0.1",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
|
@ -21,7 +21,7 @@
|
|||
"jest-fetch-mock": "^3.0.3",
|
||||
"prettier": "^2.8.8",
|
||||
"react-scripts": "^5.0.1",
|
||||
"ts-jest": "^29.1.4",
|
||||
"ts-jest": "^29.2.2",
|
||||
"typescript": "^4.9.5"
|
||||
},
|
||||
"engines": {
|
||||
|
@ -37,11 +37,11 @@
|
|||
"lru-cache": "^7.18.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@codemirror/autocomplete": "^6.16.2",
|
||||
"@codemirror/autocomplete": "^6.17.0",
|
||||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.0",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.22.1",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@lezer/common": "^1.2.1",
|
||||
"@lezer/highlight": "^1.2.0",
|
||||
"@lezer/lr": "^1.4.1",
|
||||
|
@ -2027,9 +2027,9 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@codemirror/autocomplete": {
|
||||
"version": "6.16.2",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.16.2.tgz",
|
||||
"integrity": "sha512-MjfDrHy0gHKlPWsvSsikhO1+BOh+eBHNgfH1OXs1+DAf30IonQldgMM3kxLDTG9ktE7kDLaA1j/l7KMPA4KNfw==",
|
||||
"version": "6.17.0",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz",
|
||||
"integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==",
|
||||
"dependencies": {
|
||||
"@codemirror/language": "^6.0.0",
|
||||
"@codemirror/state": "^6.0.0",
|
||||
|
@ -2068,9 +2068,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@codemirror/lint": {
|
||||
"version": "6.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.0.tgz",
|
||||
"integrity": "sha512-lsFofvaw0lnPRJlQylNsC4IRt/1lI4OD/yYslrSGVndOJfStc58v+8p9dgGiD90ktOfL7OhBWns1ZETYgz0EJA==",
|
||||
"version": "6.8.1",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz",
|
||||
"integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==",
|
||||
"dependencies": {
|
||||
"@codemirror/state": "^6.0.0",
|
||||
"@codemirror/view": "^6.0.0",
|
||||
|
@ -2093,9 +2093,9 @@
|
|||
"integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
|
||||
},
|
||||
"node_modules/@codemirror/view": {
|
||||
"version": "6.27.0",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.27.0.tgz",
|
||||
"integrity": "sha512-8kqX1sHbVW1lVzWwrjAbh4dR7eKhV8eIQ952JKaBXOoXE04WncoqCy4DMU701LSrPZ3N2Q4zsTawz7GQ+2mrUw==",
|
||||
"version": "6.28.3",
|
||||
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz",
|
||||
"integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==",
|
||||
"dependencies": {
|
||||
"@codemirror/state": "^6.4.0",
|
||||
"style-mod": "^4.1.0",
|
||||
|
@ -4199,9 +4199,9 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "20.14.2",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.2.tgz",
|
||||
"integrity": "sha512-xyu6WAMVwv6AKFLB+e/7ySZVr/0zLCzOa7rSpq6jNwpqOrUbcACDWC+53d4n2QHOnDou0fbIsg8wZu/sxrnI4Q==",
|
||||
"version": "20.14.9",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz",
|
||||
"integrity": "sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
|
@ -16807,9 +16807,9 @@
|
|||
"license": "CC0-1.0"
|
||||
},
|
||||
"node_modules/sass": {
|
||||
"version": "1.77.4",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.77.4.tgz",
|
||||
"integrity": "sha512-vcF3Ckow6g939GMA4PeU7b2K/9FALXk2KF9J87txdHzXbUF9XRQRwSxcAs/fGaTnJeBFd7UoV22j3lzMLdM0Pw==",
|
||||
"version": "1.77.6",
|
||||
"resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz",
|
||||
"integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==",
|
||||
"dependencies": {
|
||||
"chokidar": ">=3.0.0 <4.0.0",
|
||||
"immutable": "^4.0.0",
|
||||
|
@ -18028,12 +18028,13 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/ts-jest": {
|
||||
"version": "29.1.4",
|
||||
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz",
|
||||
"integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==",
|
||||
"version": "29.2.2",
|
||||
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz",
|
||||
"integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"bs-logger": "0.x",
|
||||
"ejs": "^3.0.0",
|
||||
"fast-json-stable-stringify": "2.x",
|
||||
"jest-util": "^29.0.0",
|
||||
"json5": "^2.2.3",
|
||||
|
@ -19333,13 +19334,13 @@
|
|||
"name": "@prometheus-io/app",
|
||||
"version": "0.53.1",
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.16.2",
|
||||
"@codemirror/autocomplete": "^6.17.0",
|
||||
"@codemirror/commands": "^6.6.0",
|
||||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.0",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/search": "^6.5.6",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.22.1",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@forevolve/bootstrap-dark": "^4.0.2",
|
||||
"@fortawesome/fontawesome-svg-core": "6.5.2",
|
||||
"@fortawesome/free-solid-svg-icons": "6.5.2",
|
||||
|
@ -19368,7 +19369,7 @@
|
|||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.13.0",
|
||||
"sass": "1.77.4",
|
||||
"sass": "1.77.6",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
},
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.5.12",
|
||||
"@types/node": "^20.14.2",
|
||||
"@types/node": "^20.14.9",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-config-react-app": "^7.0.1",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
|
@ -25,7 +25,7 @@
|
|||
"jest-fetch-mock": "^3.0.3",
|
||||
"prettier": "^2.8.8",
|
||||
"react-scripts": "^5.0.1",
|
||||
"ts-jest": "^29.1.4",
|
||||
"ts-jest": "^29.2.2",
|
||||
"typescript": "^4.9.5"
|
||||
},
|
||||
"version": "0.53.1"
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
"version": "0.53.1",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@codemirror/autocomplete": "^6.16.2",
|
||||
"@codemirror/autocomplete": "^6.17.0",
|
||||
"@codemirror/commands": "^6.6.0",
|
||||
"@codemirror/language": "^6.10.2",
|
||||
"@codemirror/lint": "^6.8.0",
|
||||
"@codemirror/lint": "^6.8.1",
|
||||
"@codemirror/search": "^6.5.6",
|
||||
"@codemirror/state": "^6.3.3",
|
||||
"@codemirror/view": "^6.22.1",
|
||||
"@codemirror/view": "^6.28.3",
|
||||
"@forevolve/bootstrap-dark": "^4.0.2",
|
||||
"@fortawesome/fontawesome-svg-core": "6.5.2",
|
||||
"@fortawesome/free-solid-svg-icons": "6.5.2",
|
||||
|
@ -38,7 +38,7 @@
|
|||
"react-test-renderer": "^17.0.2",
|
||||
"reactstrap": "^8.10.1",
|
||||
"sanitize-html": "^2.13.0",
|
||||
"sass": "1.77.4",
|
||||
"sass": "1.77.6",
|
||||
"tempusdominus-bootstrap-4": "^5.39.2",
|
||||
"tempusdominus-core": "^5.19.3"
|
||||
},
|
||||
|
|
Loading…
Reference in a new issue