2017-05-10 02:44:13 -07:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
2017-10-24 21:21:42 -07:00
|
|
|
"context"
|
2017-05-10 02:44:13 -07:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2020-06-16 07:11:41 -07:00
|
|
|
"strconv"
|
2019-08-28 06:25:28 -07:00
|
|
|
"strings"
|
2017-05-10 02:44:13 -07:00
|
|
|
"time"
|
|
|
|
|
2017-11-01 15:03:46 -07:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2017-05-10 02:44:13 -07:00
|
|
|
"github.com/golang/snappy"
|
2020-06-16 07:11:41 -07:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2019-03-25 16:01:12 -07:00
|
|
|
config_util "github.com/prometheus/common/config"
|
2017-10-24 21:21:42 -07:00
|
|
|
"github.com/prometheus/common/model"
|
2021-08-26 06:37:19 -07:00
|
|
|
"github.com/prometheus/common/sigv4"
|
2018-11-23 06:49:49 -08:00
|
|
|
"github.com/prometheus/common/version"
|
2022-01-25 02:08:04 -08:00
|
|
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
|
|
|
"go.opentelemetry.io/otel"
|
|
|
|
"go.opentelemetry.io/otel/trace"
|
2017-05-10 02:44:13 -07:00
|
|
|
|
2017-07-12 14:06:35 -07:00
|
|
|
"github.com/prometheus/prometheus/prompb"
|
2023-06-01 14:20:10 -07:00
|
|
|
"github.com/prometheus/prometheus/storage/remote/azuread"
|
2017-05-10 02:44:13 -07:00
|
|
|
)
|
|
|
|
|
2021-03-18 09:53:18 -07:00
|
|
|
const maxErrMsgLen = 1024
|
2017-05-10 02:44:13 -07:00
|
|
|
|
2020-08-25 09:38:37 -07:00
|
|
|
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
2018-11-23 06:49:49 -08:00
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
var (
|
|
|
|
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
|
|
|
prometheus.CounterOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "read_queries_total",
|
|
|
|
Help: "The total number of remote read queries.",
|
|
|
|
},
|
|
|
|
[]string{remoteName, endpoint, "code"},
|
|
|
|
)
|
|
|
|
remoteReadQueries = prometheus.NewGaugeVec(
|
|
|
|
prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "remote_read_queries",
|
|
|
|
Help: "The number of in-flight remote read queries.",
|
|
|
|
},
|
|
|
|
[]string{remoteName, endpoint},
|
|
|
|
)
|
|
|
|
remoteReadQueryDuration = prometheus.NewHistogramVec(
|
|
|
|
prometheus.HistogramOpts{
|
2024-03-01 05:04:54 -08:00
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "read_request_duration_seconds",
|
|
|
|
Help: "Histogram of the latency for remote read requests.",
|
|
|
|
Buckets: append(prometheus.DefBuckets, 25, 60),
|
|
|
|
NativeHistogramBucketFactor: 1.1,
|
|
|
|
NativeHistogramMaxBucketNumber: 100,
|
|
|
|
NativeHistogramMinResetDuration: 1 * time.Hour,
|
2020-06-24 06:41:52 -07:00
|
|
|
},
|
|
|
|
[]string{remoteName, endpoint},
|
|
|
|
)
|
2020-06-16 07:11:41 -07:00
|
|
|
)
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
func init() {
|
|
|
|
prometheus.MustRegister(remoteReadQueriesTotal, remoteReadQueries, remoteReadQueryDuration)
|
|
|
|
}
|
|
|
|
|
2020-08-20 07:45:31 -07:00
|
|
|
// Client allows reading and writing from/to a remote HTTP endpoint.
|
|
|
|
type Client struct {
|
2019-12-12 12:47:23 -08:00
|
|
|
remoteName string // Used to differentiate clients in metrics.
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
urlString string // url.String()
|
2020-08-20 07:45:31 -07:00
|
|
|
Client *http.Client
|
2019-12-12 12:47:23 -08:00
|
|
|
timeout time.Duration
|
2020-06-24 06:41:52 -07:00
|
|
|
|
2021-02-11 09:24:49 -08:00
|
|
|
retryOnRateLimit bool
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
readQueries prometheus.Gauge
|
|
|
|
readQueriesTotal *prometheus.CounterVec
|
|
|
|
readQueriesDuration prometheus.Observer
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
// ClientConfig configures a client.
|
2017-10-23 06:53:43 -07:00
|
|
|
type ClientConfig struct {
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 12:01:34 -08:00
|
|
|
URL *config_util.URL
|
2017-10-23 06:53:43 -07:00
|
|
|
Timeout model.Duration
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 12:01:34 -08:00
|
|
|
HTTPClientConfig config_util.HTTPClientConfig
|
2021-08-26 06:37:19 -07:00
|
|
|
SigV4Config *sigv4.SigV4Config
|
2023-06-01 14:20:10 -07:00
|
|
|
AzureADConfig *azuread.AzureADConfig
|
2021-02-04 13:18:13 -08:00
|
|
|
Headers map[string]string
|
2021-02-11 09:24:49 -08:00
|
|
|
RetryOnRateLimit bool
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
|
|
|
|
// TODO(bwplotka): Add streamed chunked remote read method as well (https://github.com/prometheus/prometheus/issues/5926).
|
|
|
|
type ReadClient interface {
|
|
|
|
Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error)
|
|
|
|
}
|
|
|
|
|
2020-09-02 09:15:10 -07:00
|
|
|
// NewReadClient creates a new client for remote read.
|
|
|
|
func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
2021-09-26 14:16:12 -07:00
|
|
|
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client")
|
2020-06-24 06:41:52 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-09-09 05:03:48 -07:00
|
|
|
t := httpClient.Transport
|
2021-02-18 04:12:21 -08:00
|
|
|
if len(conf.Headers) > 0 {
|
|
|
|
t = newInjectHeadersRoundTripper(conf.Headers, t)
|
|
|
|
}
|
2022-01-25 02:08:04 -08:00
|
|
|
httpClient.Transport = otelhttp.NewTransport(t)
|
2020-09-09 05:03:48 -07:00
|
|
|
|
2020-08-20 07:45:31 -07:00
|
|
|
return &Client{
|
2020-06-24 06:41:52 -07:00
|
|
|
remoteName: name,
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
urlString: conf.URL.String(),
|
2020-08-20 07:45:31 -07:00
|
|
|
Client: httpClient,
|
2020-06-24 06:41:52 -07:00
|
|
|
timeout: time.Duration(conf.Timeout),
|
|
|
|
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
|
|
|
|
readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
|
|
|
|
readQueriesDuration: remoteReadQueryDuration.WithLabelValues(name, conf.URL.String()),
|
|
|
|
}, nil
|
2020-06-16 07:11:41 -07:00
|
|
|
}
|
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
// NewWriteClient creates a new client for remote write.
|
|
|
|
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
2021-09-26 14:16:12 -07:00
|
|
|
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client")
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-02 13:41:55 -07:00
|
|
|
t := httpClient.Transport
|
2021-03-08 11:20:09 -08:00
|
|
|
|
2024-01-29 19:49:40 -08:00
|
|
|
if len(conf.Headers) > 0 {
|
|
|
|
t = newInjectHeadersRoundTripper(conf.Headers, t)
|
|
|
|
}
|
|
|
|
|
2021-03-08 11:20:09 -08:00
|
|
|
if conf.SigV4Config != nil {
|
2024-01-29 19:49:40 -08:00
|
|
|
t, err = sigv4.NewSigV4RoundTripper(conf.SigV4Config, t)
|
2021-03-08 11:20:09 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-01 14:20:10 -07:00
|
|
|
if conf.AzureADConfig != nil {
|
2024-01-29 19:49:40 -08:00
|
|
|
t, err = azuread.NewAzureADRoundTripper(conf.AzureADConfig, t)
|
2023-06-01 14:20:10 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-25 02:08:04 -08:00
|
|
|
httpClient.Transport = otelhttp.NewTransport(t)
|
2020-05-02 13:41:55 -07:00
|
|
|
|
2020-08-20 07:45:31 -07:00
|
|
|
return &Client{
|
2021-02-11 09:24:49 -08:00
|
|
|
remoteName: name,
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
urlString: conf.URL.String(),
|
2021-02-11 09:24:49 -08:00
|
|
|
Client: httpClient,
|
|
|
|
retryOnRateLimit: conf.RetryOnRateLimit,
|
|
|
|
timeout: time.Duration(conf.Timeout),
|
2017-05-10 02:44:13 -07:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-02-18 04:12:21 -08:00
|
|
|
func newInjectHeadersRoundTripper(h map[string]string, underlyingRT http.RoundTripper) *injectHeadersRoundTripper {
|
|
|
|
return &injectHeadersRoundTripper{headers: h, RoundTripper: underlyingRT}
|
|
|
|
}
|
|
|
|
|
|
|
|
type injectHeadersRoundTripper struct {
|
|
|
|
headers map[string]string
|
|
|
|
http.RoundTripper
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *injectHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
|
|
|
for key, value := range t.headers {
|
|
|
|
req.Header.Set(key, value)
|
|
|
|
}
|
|
|
|
return t.RoundTripper.RoundTrip(req)
|
|
|
|
}
|
|
|
|
|
2021-02-10 14:25:37 -08:00
|
|
|
const defaultBackoff = 0
|
|
|
|
|
2020-07-29 10:08:25 -07:00
|
|
|
type RecoverableError struct {
|
2017-05-10 02:44:13 -07:00
|
|
|
error
|
2021-02-10 14:25:37 -08:00
|
|
|
retryAfter model.Duration
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2018-09-07 14:26:04 -07:00
|
|
|
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
|
|
|
|
// and encoded bytes from codec.go.
|
2023-09-20 03:11:03 -07:00
|
|
|
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req))
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
2019-09-30 08:54:55 -07:00
|
|
|
// Errors from NewRequest are from unparsable URLs, so are not
|
2017-05-10 02:44:13 -07:00
|
|
|
// recoverable.
|
|
|
|
return err
|
|
|
|
}
|
2021-02-18 04:12:21 -08:00
|
|
|
|
2017-05-10 02:44:13 -07:00
|
|
|
httpReq.Header.Add("Content-Encoding", "snappy")
|
|
|
|
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
2020-08-25 09:38:37 -07:00
|
|
|
httpReq.Header.Set("User-Agent", UserAgent)
|
2017-05-10 02:44:13 -07:00
|
|
|
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
2023-09-20 03:11:03 -07:00
|
|
|
if attempt > 0 {
|
|
|
|
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
|
|
|
|
}
|
|
|
|
|
2020-01-27 06:43:20 -08:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
2017-05-10 02:44:13 -07:00
|
|
|
defer cancel()
|
|
|
|
|
2022-01-25 02:08:04 -08:00
|
|
|
ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient))
|
|
|
|
defer span.End()
|
2020-06-01 08:21:13 -07:00
|
|
|
|
2022-01-25 02:08:04 -08:00
|
|
|
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
2020-08-20 07:45:31 -07:00
|
|
|
// Errors from Client.Do are from (for example) network errors, so are
|
2017-05-10 02:44:13 -07:00
|
|
|
// recoverable.
|
2021-02-10 14:25:37 -08:00
|
|
|
return RecoverableError{err, defaultBackoff}
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
2019-04-18 01:50:37 -07:00
|
|
|
defer func() {
|
2022-04-27 02:24:36 -07:00
|
|
|
io.Copy(io.Discard, httpResp.Body)
|
2019-04-18 01:50:37 -07:00
|
|
|
httpResp.Body.Close()
|
|
|
|
}()
|
2017-05-10 02:44:13 -07:00
|
|
|
|
|
|
|
if httpResp.StatusCode/100 != 2 {
|
|
|
|
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
|
|
|
|
line := ""
|
|
|
|
if scanner.Scan() {
|
|
|
|
line = scanner.Text()
|
|
|
|
}
|
2022-07-01 09:59:50 -07:00
|
|
|
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
2023-08-11 09:37:53 -07:00
|
|
|
if httpResp.StatusCode/100 == 5 ||
|
|
|
|
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
|
2021-02-10 14:25:37 -08:00
|
|
|
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-02-10 14:25:37 -08:00
|
|
|
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it
|
|
|
|
// returns the defaultBackoff as if the header was never supplied.
|
|
|
|
func retryAfterDuration(t string) model.Duration {
|
|
|
|
parsedDuration, err := time.Parse(http.TimeFormat, t)
|
|
|
|
if err == nil {
|
|
|
|
s := time.Until(parsedDuration).Seconds()
|
|
|
|
return model.Duration(s) * model.Duration(time.Second)
|
|
|
|
}
|
|
|
|
// The duration can be in seconds.
|
|
|
|
d, err := strconv.Atoi(t)
|
|
|
|
if err != nil {
|
|
|
|
return defaultBackoff
|
|
|
|
}
|
|
|
|
return model.Duration(d) * model.Duration(time.Second)
|
|
|
|
}
|
|
|
|
|
2019-12-12 12:47:23 -08:00
|
|
|
// Name uniquely identifies the client.
|
2020-08-20 07:45:31 -07:00
|
|
|
func (c Client) Name() string {
|
2019-12-12 12:47:23 -08:00
|
|
|
return c.remoteName
|
|
|
|
}
|
|
|
|
|
|
|
|
// Endpoint is the remote read or write endpoint.
|
2020-08-20 07:45:31 -07:00
|
|
|
func (c Client) Endpoint() string {
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
return c.urlString
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read reads from a remote endpoint.
|
2020-08-20 07:45:31 -07:00
|
|
|
func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
|
2020-06-24 06:41:52 -07:00
|
|
|
c.readQueries.Inc()
|
|
|
|
defer c.readQueries.Dec()
|
|
|
|
|
2017-10-23 06:44:57 -07:00
|
|
|
req := &prompb.ReadRequest{
|
2017-10-23 06:53:43 -07:00
|
|
|
// TODO: Support batching multiple queries into one read request,
|
|
|
|
// as the protobuf interface allows for it.
|
2017-10-23 06:44:57 -07:00
|
|
|
Queries: []*prompb.Query{
|
|
|
|
query,
|
|
|
|
},
|
|
|
|
}
|
2017-05-10 02:44:13 -07:00
|
|
|
data, err := proto.Marshal(req)
|
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("unable to marshal read request: %w", err)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
compressed := snappy.Encode(nil, data)
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(compressed))
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("unable to create request: %w", err)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
httpReq.Header.Add("Content-Encoding", "snappy")
|
2018-07-26 04:45:04 -07:00
|
|
|
httpReq.Header.Add("Accept-Encoding", "snappy")
|
2017-05-10 02:44:13 -07:00
|
|
|
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
2020-08-25 09:38:37 -07:00
|
|
|
httpReq.Header.Set("User-Agent", UserAgent)
|
2017-05-10 02:44:13 -07:00
|
|
|
httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-01-25 02:08:04 -08:00
|
|
|
ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient))
|
|
|
|
defer span.End()
|
2020-05-02 13:41:55 -07:00
|
|
|
|
2020-06-24 06:41:52 -07:00
|
|
|
start := time.Now()
|
2022-01-25 02:08:04 -08:00
|
|
|
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("error sending request: %w", err)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
2019-04-18 01:50:37 -07:00
|
|
|
defer func() {
|
2022-04-27 02:24:36 -07:00
|
|
|
io.Copy(io.Discard, httpResp.Body)
|
2019-04-18 01:50:37 -07:00
|
|
|
httpResp.Body.Close()
|
|
|
|
}()
|
2020-06-24 06:41:52 -07:00
|
|
|
c.readQueriesDuration.Observe(time.Since(start).Seconds())
|
|
|
|
c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc()
|
2020-06-16 07:11:41 -07:00
|
|
|
|
2022-04-27 02:24:36 -07:00
|
|
|
compressed, err = io.ReadAll(httpResp.Body)
|
2017-05-10 02:44:13 -07:00
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
|
2019-08-28 06:25:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if httpResp.StatusCode/100 != 2 {
|
remote.Client: store urlString
During remote write, we call url.String() twice:
- to add the Endpoint() to the span
- to actually know where whe should send the request
This value does not change over time, and it's not really that
lightweight to calculate. I wrote this simple benchmark:
func BenchmarkURLString(b *testing.B) {
u, err := url.Parse("https://remote.write.com/api/v1")
require.NoError(b, err)
b.Run("string", func(b *testing.B) {
count := 0
for i := 0; i < b.N; i++ {
count += len(u.String())
}
})
}
And the results are ~200ns/op, 80B/op, 3 allocs/op.
Yes, we're going to go to the network here, which is a huge amount of
resources compared to this, but still, on agents that send 500 requests
per second, that is 1500 wasteful allocations per second.
Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
2023-03-16 01:36:19 -07:00
|
|
|
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
uncompressed, err := snappy.Decode(nil, compressed)
|
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("error reading response: %w", err)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2017-07-12 14:06:35 -07:00
|
|
|
var resp prompb.ReadResponse
|
2017-05-10 02:44:13 -07:00
|
|
|
err = proto.Unmarshal(uncompressed, &resp)
|
|
|
|
if err != nil {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("unable to unmarshal response body: %w", err)
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp.Results) != len(req.Queries) {
|
2022-07-01 09:59:50 -07:00
|
|
|
return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|
|
|
|
|
2017-10-23 13:28:17 -07:00
|
|
|
return resp.Results[0], nil
|
2017-05-10 02:44:13 -07:00
|
|
|
}
|