diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 46246b672..3117fd29c 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -116,7 +116,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s // Encode the request body into snappy encoding. compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed, 0) + err = client.Store(context.Background(), compressed, 0, remote.Version1, "snappy") if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/storage/remote/client.go b/storage/remote/client.go index 943ba5cd0..09014cea2 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -17,6 +17,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "math/rand" @@ -46,6 +47,9 @@ const maxErrMsgLen = 1024 var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) +var ErrStatusBadRequest = errors.New("HTTP StatusBadRequest") // 400 +var ErrStatusNotAcceptable = errors.New("HTTP StatusNotAcceptable") // 406 + var ( remoteReadQueriesTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -83,11 +87,12 @@ func init() { // Client allows reading and writing from/to a remote HTTP endpoint. type Client struct { - remoteName string // Used to differentiate clients in metrics. - urlString string // url.String() - rwFormat config.RemoteWriteFormat // For write clients, ignored for read clients. - Client *http.Client - timeout time.Duration + remoteName string // Used to differentiate clients in metrics. + urlString string // url.String() + rwFormat config.RemoteWriteFormat // For write clients, ignored for read clients. + lastRWHeader string + Client *http.Client + timeout time.Duration retryOnRateLimit bool @@ -199,9 +204,53 @@ type RecoverableError struct { retryAfter model.Duration } +// Attempt a HEAD request against a remote write endpoint to see what it supports. +func (c *Client) GetProtoVersions(ctx context.Context) (string, error) { + // If we are in Version1 mode then don't even bother + if c.rwFormat == Version1 { + return RemoteWriteVersion1HeaderValue, nil + } + + httpReq, err := http.NewRequest("HEAD", c.urlString, nil) + if err != nil { + // Errors from NewRequest are from unparsable URLs, so are not + // recoverable. + return "", err + } + + // Set the version header to be nice + httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + httpReq.Header.Set("User-Agent", UserAgent) + + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + + httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) + if err != nil { + // We don't attempt a retry here + return "", err + } + + // See if we got a header anyway + promHeader := httpResp.Header.Get(RemoteWriteVersionHeader) + + // Only update lastRWHeader if the X-Prometheus-Remote-Write header is not blank + if promHeader != "" { + c.lastRWHeader = promHeader + } + + // Check for an error + if httpResp.StatusCode != 200 { + return promHeader, fmt.Errorf(httpResp.Status) + } + + // All ok, return header and no error + return promHeader, nil +} + // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // and encoded bytes from codec.go. -func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { +func (c *Client) Store(ctx context.Context, req []byte, attempt int, rwFormat config.RemoteWriteFormat, compression string) error { httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req)) if err != nil { // Errors from NewRequest are from unparsable URLs, so are not @@ -209,15 +258,15 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { return err } - httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Add("Content-Encoding", compression) httpReq.Header.Set("Content-Type", "application/x-protobuf") httpReq.Header.Set("User-Agent", UserAgent) - if c.rwFormat == Version1 { + if rwFormat == Version1 { httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue) } else { - // Set the right header if we're using v1.1 remote write protocol - httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion2HeaderValue) + // Set the right header if we're using v2.0 remote write protocol + httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) } if attempt > 0 { @@ -241,7 +290,12 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { httpResp.Body.Close() }() - // TODO-RW11: Here is where we need to handle version downgrade on error + // See if we got a X-Prometheus-Remote-Write header in the response + if promHeader := httpResp.Header.Get(RemoteWriteVersionHeader); promHeader != "" { + // Only update lastRWHeader if the X-Prometheus-Remote-Write header is not blank + // (It's blank if it wasn't present, we don't care about that distinction.) + c.lastRWHeader = promHeader + } if httpResp.StatusCode/100 != 2 { scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) @@ -249,7 +303,22 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { if scanner.Scan() { line = scanner.Text() } - err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + switch httpResp.StatusCode { + case 400: + // Return an unrecoverable error to indicate the 400 + // This then gets passed up the chain so we can react to it properly + // TODO(alexg) Do we want to include the first line of the message? + return ErrStatusBadRequest + case 406: + // Return an unrecoverable error to indicate the 406 + // This then gets passed up the chain so we can react to it properly + // TODO(alexg) Do we want to include the first line of the message? + // TODO(alexg) Do we want to combine these two errors as one, with the statuscode and first line of message in the error? + return ErrStatusNotAcceptable + default: + // We want to end up returning a non-specific error + err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + } } if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { @@ -284,6 +353,10 @@ func (c Client) Endpoint() string { return c.urlString } +func (c *Client) GetLastRWHeader() string { + return c.lastRWHeader +} + // Read reads from a remote endpoint. func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) { c.readQueries.Inc() @@ -366,7 +439,11 @@ func NewTestClient(name, url string) WriteClient { return &TestClient{name: name, url: url} } -func (c *TestClient) Store(_ context.Context, req []byte, _ int) error { +func (c *TestClient) GetProtoVersions(_ context.Context) (string, error) { + return "2.0;snappy,0.1.0", nil +} + +func (c *TestClient) Store(_ context.Context, req []byte, _ int, _ config.RemoteWriteFormat, _ string) error { r := rand.Intn(200-100) + 100 time.Sleep(time.Duration(r) * time.Millisecond) return nil @@ -379,3 +456,7 @@ func (c *TestClient) Name() string { func (c *TestClient) Endpoint() string { return c.url } + +func (c *TestClient) GetLastRWHeader() string { + return "2.0;snappy,0.1.0" +} diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 2acb8e279..1711f0fdf 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -73,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) { c, err := NewWriteClient(hash, conf) require.NoError(t, err) - err = c.Store(context.Background(), []byte{}, 0) + err = c.Store(context.Background(), []byte{}, 0, Version1, "snappy") if test.err != nil { require.EqualError(t, err, test.err.Error()) } else { @@ -133,7 +133,7 @@ func TestClientRetryAfter(t *testing.T) { c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit)) var recErr RecoverableError - err = c.Store(context.Background(), []byte{}, 0) + err = c.Store(context.Background(), []byte{}, 0, Version1, "snappy") require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.") if tc.expectedRecoverable { require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter) @@ -203,7 +203,7 @@ func TestClientHeaders(t *testing.T) { c, err := NewWriteClient("c", conf) require.NoError(t, err) - err = c.Store(context.Background(), []byte{}, 0) + err = c.Store(context.Background(), []byte{}, 0, Version1, "snappy") require.NoError(t, err) require.True(t, called, "The remote server wasn't called") diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 750fb612c..608f5cc2b 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -559,7 +559,7 @@ func TestMetricTypeToMetricTypeProto(t *testing.T) { } func TestDecodeWriteRequest(t *testing.T) { - buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil) + buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeWriteRequest(bytes.NewReader(buf)) @@ -568,7 +568,7 @@ func TestDecodeWriteRequest(t *testing.T) { } func TestDecodeMinWriteRequest(t *testing.T) { - buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil) + buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) actual, err := DecodeMinimizedWriteRequestStr(bytes.NewReader(buf)) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 72cbb7cad..c5b265583 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -16,8 +16,10 @@ package remote import ( "context" "errors" + "fmt" "math" "strconv" + "strings" "sync" "time" @@ -387,11 +389,15 @@ func (m *queueManagerMetrics) unregister() { // external timeseries database. type WriteClient interface { // Store stores the given samples in the remote storage. - Store(ctx context.Context, req []byte, retryAttempt int) error + Store(ctx context.Context, req []byte, retryAttempt int, rwFormat config.RemoteWriteFormat, compression string) error // Name uniquely identifies the remote storage. Name() string // Endpoint is the remote read or write endpoint for the storage client. Endpoint() string + // Get the protocol versions supported by the endpoint + GetProtoVersions(ctx context.Context) (string, error) + // Get the last RW header received from the endpoint + GetLastRWHeader() string } const ( @@ -416,8 +422,7 @@ type QueueManager struct { sendNativeHistograms bool watcher *wlog.Watcher metadataWatcher *MetadataWatcher - // experimental feature, new remote write proto format - rwFormat config.RemoteWriteFormat + rwFormat config.RemoteWriteFormat clientMtx sync.RWMutex storeClient WriteClient @@ -490,9 +495,7 @@ func NewQueueManager( storeClient: client, sendExemplars: enableExemplarRemoteWrite, sendNativeHistograms: enableNativeHistogramRemoteWrite, - // TODO: we should eventually set the format via content negotiation, - // so this field would be the desired format, maybe with a fallback? - rwFormat: rwFormat, + rwFormat: rwFormat, seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata), @@ -572,7 +575,11 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { // Build the WriteRequest with no samples. - req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil) + + // Get compression to use from content negotiation based on last header seen (defaults to snappy) + compression, _ := negotiateRWProto(t.rwFormat, t.storeClient.GetLastRWHeader()) + + req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, compression) if err != nil { return err } @@ -595,7 +602,7 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p } begin := time.Now() - err := t.storeClient.Store(ctx, req, try) + err := t.storeClient.Store(ctx, req, try, Version1, compression) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { @@ -1491,6 +1498,41 @@ func (q *queue) newBatch(capacity int) []timeSeries { return make([]timeSeries, 0, capacity) } +func negotiateRWProto(rwFormat config.RemoteWriteFormat, lastHeaderSeen string) (string, config.RemoteWriteFormat) { + if rwFormat == Version1 { + // If we're only handling Version1 then all we can do is that with snappy compression + return "snappy", Version1 + } + if rwFormat != Version2 { + // If we get here then someone has added a new RemoteWriteFormat value but hasn't + // fixed this function to handle it + // panic! + panic(fmt.Sprintf("Unhandled RemoteWriteFormat %q", rwFormat)) + } + if lastHeaderSeen == "" { + // We haven't had a valid header, so we just default to 0.1.0/snappy + return "snappy", Version1 + } + // We can currently handle: + // "2.0;snappy" + // "0.1.0" - implicit compression of snappy + // lastHeaderSeen should contain a list of tuples + // If we find a match to something we can handle then we can return that + for _, tuple := range strings.Split(lastHeaderSeen, ",") { + // Remove spaces from the tuple + curr := strings.ReplaceAll(tuple, " ", "") + switch curr { + case "2.0;snappy": + return "snappy", Version2 + case "0.1.0": + return "snappy", Version1 + } + } + + // Otherwise we have to default to "0.1.0" + return "snappy", Version1 +} + func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { defer func() { if s.running.Dec() == 0 { @@ -1541,6 +1583,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { } defer stop() + attemptBatchSend := func(batch []timeSeries, rwFormat config.RemoteWriteFormat, compression string, timer bool) error { + switch rwFormat { + case Version1: + nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) + n := nPendingSamples + nPendingExemplars + nPendingHistograms + if timer { + level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, + "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) + } + return s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, compression) + case Version2: + nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingMinStrData, s.qm.sendExemplars, s.qm.sendNativeHistograms) + n := nPendingSamples + nPendingExemplars + nPendingHistograms + err := s.sendV2Samples(ctx, pendingMinStrData[:n], symbolTable.LabelsStrings(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, compression) + symbolTable.clear() + return err + } + return nil + } + for { select { case <-ctx.Done(): @@ -1564,16 +1626,20 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { if !ok { return } - switch s.qm.rwFormat { - case Version1: - nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) - n := nPendingSamples + nPendingExemplars + nPendingHistograms - s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) - case Version2: - nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingMinStrData, s.qm.sendExemplars, s.qm.sendNativeHistograms) - n := nPendingSamples + nPendingExemplars + nPendingHistograms - s.sendV2Samples(ctx, pendingMinStrData[:n], symbolTable.LabelsStrings(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf) - symbolTable.clear() + // Resend logic on 406 + // ErrStatusNotAcceptable is a new error defined in client.go + + // Work out what version to send based on the last header seen and the QM's rwFormat setting + // TODO(alexg) - see comments below about retry/renegotiate design + for attemptNos := 1; attemptNos <= 3; attemptNos++ { + lastHeaderSeen := s.qm.storeClient.GetLastRWHeader() + compression, rwFormat := negotiateRWProto(s.qm.rwFormat, lastHeaderSeen) + sendErr := attemptBatchSend(batch, rwFormat, compression, false) + if sendErr == nil || !(errors.Is(sendErr, ErrStatusNotAcceptable) || errors.Is(sendErr, ErrStatusBadRequest)) { + // It wasn't an error, or wasn't a 406 or 400 so we can just stop trying + break + } + // If we get either of the two errors (406, 400) we loop and re-negotiate } queue.ReturnForReuse(batch) @@ -1584,19 +1650,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { case <-timer.C: batch := queue.Batch() if len(batch) > 0 { - switch s.qm.rwFormat { - case Version1: - nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) - n := nPendingSamples + nPendingExemplars + nPendingHistograms - s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, - "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) - case Version2: - nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingMinStrData, s.qm.sendExemplars, s.qm.sendNativeHistograms) - n := nPendingSamples + nPendingExemplars + nPendingHistograms - s.sendV2Samples(ctx, pendingMinStrData[:n], symbolTable.LabelsStrings(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf) - symbolTable.clear() + for attemptNos := 1; attemptNos <= 3; attemptNos++ { + // Work out what version to send based on the last header seen and the QM's rwFormat setting + lastHeaderSeen := s.qm.storeClient.GetLastRWHeader() + compression, rwFormat := negotiateRWProto(s.qm.rwFormat, lastHeaderSeen) + sendErr := attemptBatchSend(batch, rwFormat, compression, true) + if sendErr == nil || !(errors.Is(sendErr, ErrStatusNotAcceptable) || errors.Is(sendErr, ErrStatusBadRequest)) { + // It wasn't an error, or wasn't a 406 or 400 so we can just stop trying + break + } + // If we get either of the two errors (406, 400) we loop and re-negotiate } + // TODO(alexg) - the question here is whether we use the 3rd attempt to ensure we + // Consider a server that erroneously reports it can handle "0.2.0/snappy" even in the 406/400 errors when that data is sent in that format + // Q: Do we always try downgrading to 1.0 at least once? + // Q: Do we limit our attempts to only try a particular protocol/encoding tuple once? + // Q: Is 3 a suitable limit? + // TODO(alexg) - add retry/renegotiate metrics here } queue.ReturnForReuse(batch) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) @@ -1645,16 +1715,22 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen return nPendingSamples, nPendingExemplars, nPendingHistograms } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, compression string) error { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf) + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, compression) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin)) + + // Return the error in case it is a 406 and we need to reformat the data + return err } -func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte) { +func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, compression string) error { begin := time.Now() - err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf) + err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, compression) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin)) + + // Return the error in case it is a 406 and we need to reformat the data + return err } func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) { @@ -1681,9 +1757,9 @@ func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exem } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, compression string) error { // Build the WriteRequest with no metadata. - req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil) + req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, compression) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will @@ -1709,6 +1785,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti pBuf, buf, isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)), + compression, ) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { @@ -1740,7 +1817,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + err := s.qm.client().Store(ctx, *buf, try, Version1, compression) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { @@ -1771,9 +1848,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti } // sendV2Samples to the remote storage with backoff for recoverable errors. -func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte) error { +func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, compression string) error { // Build the WriteRequest with no metadata. - req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil) + req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, compression) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will @@ -1799,6 +1876,7 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 pBuf, buf, isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)), + compression, ) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { @@ -1830,7 +1908,7 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + err := s.qm.client().Store(ctx, *buf, try, Version2, compression) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { @@ -2017,7 +2095,7 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms } -func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) { +func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, compression string) ([]byte, int64, int64, error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) @@ -2047,10 +2125,18 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada } else { buf = &[]byte{} } - compressed := snappy.Encode(*buf, pBuf.Bytes()) - if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) { - // grow the buffer for the next time - *buf = make([]byte, n) + + var compressed []byte + + switch compression { + case "snappy": + compressed = snappy.Encode(*buf, pBuf.Bytes()) + if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) { + // grow the buffer for the next time + *buf = make([]byte, n) + } + default: + return nil, highest, lowest, fmt.Errorf("Unknown compression scheme [%s]", compression) } return compressed, highest, lowest, nil } @@ -2087,7 +2173,7 @@ func (r *rwSymbolTable) clear() { } } -func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool) ([]byte, int64, int64, error) { +func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, compression string) ([]byte, int64, int64, error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { @@ -2117,10 +2203,17 @@ func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels buf = &[]byte{} } - compressed := snappy.Encode(*buf, data) - if n := snappy.MaxEncodedLen(len(data)); n > len(*buf) { - // grow the buffer for the next time - *buf = make([]byte, n) + var compressed []byte + + switch compression { + case "snappy": + compressed = snappy.Encode(*buf, data) + if n := snappy.MaxEncodedLen(len(data)); n > len(*buf) { + // grow the buffer for the next time + *buf = make([]byte, n) + } + default: + return nil, highest, lowest, fmt.Errorf("Unknown compression scheme [%s]", compression) } return compressed, highest, lowest, nil diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index dcf4a8302..7fac78916 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -63,6 +63,136 @@ func newHighestTimestampMetric() *maxTimestamp { } } +type contentNegotiationStep struct { + lastRWHeader string + compression string + behaviour error // or nil + attemptString string +} + +func TestContentNegotiation(t *testing.T) { + testcases := []struct { + name string + success bool + qmRwFormat config.RemoteWriteFormat + rwFormat config.RemoteWriteFormat + steps []contentNegotiationStep + }{ + // Test a simple case where the v2 request we send is processed first time + {success: true, name: "v2 happy path", qmRwFormat: Version2, rwFormat: Version2, steps: []contentNegotiationStep{ + {lastRWHeader: "2.0;snappy,0.1.0", compression: "snappy", behaviour: nil, attemptString: "0,1,snappy,ok"}, + }, + }, + // Test a simple case where the v1 request we send is processed first time + {success: true, name: "v1 happy path", qmRwFormat: Version1, rwFormat: Version1, steps: []contentNegotiationStep{ + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: nil, attemptString: "0,0,snappy,ok"}, + }, + }, + // Test a case where the v1 request has a temporary delay but goes through on retry + // There is no content re-negotiation between first and retry attempts + {success: true, name: "v1 happy path with one 5xx retry", qmRwFormat: Version1, rwFormat: Version1, steps: []contentNegotiationStep{ + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: RecoverableError{fmt.Errorf("Pretend 500"), 1}, attemptString: "0,0,snappy,Pretend 500"}, + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: nil, attemptString: "1,0,snappy,ok"}, + }, + }, + // Repeat the above test but with v2. The request has a temporary delay but goes through on retry + // There is no content re-negotiation between first and retry attempts + {success: true, name: "v2 happy path with one 5xx retry", qmRwFormat: Version2, rwFormat: Version2, steps: []contentNegotiationStep{ + {lastRWHeader: "2.0;snappy,0.1.0", compression: "snappy", behaviour: RecoverableError{fmt.Errorf("Pretend 500"), 1}, attemptString: "0,1,snappy,Pretend 500"}, + {lastRWHeader: "2.0;snappy,0.1.0", compression: "snappy", behaviour: nil, attemptString: "1,1,snappy,ok"}, + }, + }, + // Now test where the server suddenly stops speaking 2.0 and we need to downgrade + {success: true, name: "v2 request to v2 server that has downgraded via 406", qmRwFormat: Version2, rwFormat: Version2, steps: []contentNegotiationStep{ + {lastRWHeader: "2.0;snappy,0.1.0", compression: "snappy", behaviour: ErrStatusNotAcceptable, attemptString: "0,1,snappy,HTTP StatusNotAcceptable"}, + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: nil, attemptString: "0,0,snappy,ok"}, + }, + }, + // Now test where the server suddenly stops speaking 2.0 and we need to downgrade because it returns a 400 + {success: true, name: "v2 request to v2 server that has downgraded via 400", qmRwFormat: Version2, rwFormat: Version2, steps: []contentNegotiationStep{ + {lastRWHeader: "2.0;snappy,0.1.0", compression: "snappy", behaviour: ErrStatusBadRequest, attemptString: "0,1,snappy,HTTP StatusBadRequest"}, + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: nil, attemptString: "0,0,snappy,ok"}, + }, + }, + // Now test where the server flip flops between "2.0;snappy" and "0.1.0" only + {success: false, name: "flip flopping", qmRwFormat: Version2, rwFormat: Version2, steps: []contentNegotiationStep{ + {lastRWHeader: "2.0;snappy", compression: "snappy", behaviour: ErrStatusNotAcceptable, attemptString: "0,1,snappy,HTTP StatusNotAcceptable"}, + {lastRWHeader: "0.1.0", compression: "snappy", behaviour: ErrStatusNotAcceptable, attemptString: "0,0,snappy,HTTP StatusNotAcceptable"}, + {lastRWHeader: "2.0;snappy", compression: "snappy", behaviour: ErrStatusNotAcceptable, attemptString: "0,1,snappy,HTTP StatusNotAcceptable"}, + // There's no 4th attempt as we do a maximum of 3 sending attempts (not counting retries) + }, + }, + } + + queueConfig := config.DefaultQueueConfig + queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) + queueConfig.MaxShards = 1 + + // We need to set URL's so that metric creation doesn't panic. + writeConfig := baseRemoteWriteConfig("http://test-storage.com") + writeConfig.QueueConfig = queueConfig + writeConfig.SendExemplars = true // ALEXG - need? + writeConfig.SendNativeHistograms = true // ALEXG - need? + + conf := &config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + RemoteWriteConfigs: []*config.RemoteWriteConfig{ + writeConfig, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true) + defer s.Close() + + var ( + series []record.RefSeries + metadata []record.RefMetadata + samples []record.RefSample + ) + + // Generates same series in both cases. + samples, series = createTimeseries(1, 1) + metadata = createSeriesMetadata(series) + + // Apply new config. + queueConfig.Capacity = len(samples) + queueConfig.MaxSamplesPerSend = len(samples) + // For now we only ever have a single rw config in this test. + conf.RemoteWriteConfigs[0].ProtocolVersion = tc.qmRwFormat + require.NoError(t, s.ApplyConfig(conf)) + hash, err := toHash(writeConfig) + require.NoError(t, err) + qm := s.rws.queues[hash] + + c := NewTestWriteClient(tc.rwFormat) + c.setSteps(tc.steps) // set expected behaviour + qm.SetClient(c) + + qm.StoreSeries(series, 0) + qm.StoreMetadata(metadata) + + // Did we expect some data back? + if tc.success { + c.expectSamples(samples, series) + } + qm.Append(samples) + if !tc.success { + // We just need to sleep for a bit to give it time to run + time.Sleep(2 * time.Second) + } else { + c.waitForExpectedData(t, 2*time.Second) + } + require.Equal(t, len(c.sendAttempts), len(tc.steps)) + for i, s := range c.sendAttempts { + require.Equal(t, s, tc.steps[i].attemptString) + } + }) + } +} + func TestSampleDelivery(t *testing.T) { testcases := []struct { name string @@ -78,7 +208,7 @@ func TestSampleDelivery(t *testing.T) { {samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, {samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, - // TODO: update some portion of this test to check for the 2.0 metadata + // TODO(alexg): update some portion of this test to check for the 2.0 metadata {samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only", rwFormat: Version2}, {samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms", rwFormat: Version2}, {samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only", rwFormat: Version2}, @@ -140,8 +270,6 @@ func TestSampleDelivery(t *testing.T) { // Apply new config. queueConfig.Capacity = len(samples) queueConfig.MaxSamplesPerSend = len(samples) / 2 - // For now we only ever have a single rw config in this test. - conf.RemoteWriteConfigs[0].ProtocolVersion = tc.rwFormat require.NoError(t, s.ApplyConfig(conf)) hash, err := toHash(writeConfig) require.NoError(t, err) @@ -233,7 +361,7 @@ func (c *perRequestWriteClient) expectedData(t testing.TB) { } } -func (c *perRequestWriteClient) Store(ctx context.Context, req []byte, r int) error { +func (c *perRequestWriteClient) Store(ctx context.Context, req []byte, r int, rwFormat config.RemoteWriteFormat, compression string) error { c.mtx.Lock() defer c.mtx.Unlock() defer func() { c.i++ }() @@ -241,7 +369,7 @@ func (c *perRequestWriteClient) Store(ctx context.Context, req []byte, r int) er return nil } - if err := c.TestWriteClient.Store(ctx, req, r); err != nil { + if err := c.TestWriteClient.Store(ctx, req, r, rwFormat, compression); err != nil { return err } @@ -271,7 +399,7 @@ func (c *perRequestWriteClient) Store(ctx context.Context, req []byte, r int) er } c.requests[c.i].expectSamples(c.expectedRequestSamples[expReqSampleToUse], c.expectedSeries) c.expectedRequestSamples = append(c.expectedRequestSamples[:expReqSampleToUse], c.expectedRequestSamples[expReqSampleToUse+1:]...) - return c.requests[c.i].Store(ctx, req, r) + return c.requests[c.i].Store(ctx, req, r, rwFormat, compression) } func testDefaultQueueConfig() config.QueueConfig { @@ -362,7 +490,7 @@ func TestMetadataDelivery(t *testing.T) { mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, 0) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Version1) m.Start() defer m.Stop() @@ -970,6 +1098,10 @@ type TestWriteClient struct { mtx sync.Mutex buf []byte rwFormat config.RemoteWriteFormat + sendAttempts []string + steps []contentNegotiationStep + currstep int + retry bool } func NewTestWriteClient(rwFormat config.RemoteWriteFormat) *TestWriteClient { @@ -981,6 +1113,12 @@ func NewTestWriteClient(rwFormat config.RemoteWriteFormat) *TestWriteClient { } } +func (c *TestWriteClient) setSteps(steps []contentNegotiationStep) { + c.steps = steps + c.currstep = -1 // incremented by GetLastRWHeader() + c.retry = false +} + func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.RefSeries) { c.mtx.Lock() defer c.mtx.Unlock() @@ -1087,7 +1225,7 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB, timeout time.Durati } } -func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { +func (c *TestWriteClient) Store(_ context.Context, req []byte, attemptNos int, rwFormat config.RemoteWriteFormat, compression string) error { c.mtx.Lock() defer c.mtx.Unlock() // nil buffers are ok for snappy, ignore cast error. @@ -1100,8 +1238,23 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { return err } + attemptString := fmt.Sprintf("%d,%d,%s", attemptNos, rwFormat, compression) + + if attemptNos > 0 { + // If this is a second attempt then we need to bump to the next step otherwise we loop + c.currstep++ + } + + // Check if we've been told to return something for this config + if len(c.steps) > 0 { + if err = c.steps[c.currstep].behaviour; err != nil { + c.sendAttempts = append(c.sendAttempts, attemptString+","+fmt.Sprintf("%s", err)) + return err + } + } + var reqProto *prompb.WriteRequest - switch c.rwFormat { + switch rwFormat { case Version1: reqProto = &prompb.WriteRequest{} err = proto.Unmarshal(reqBuf, reqProto) @@ -1114,6 +1267,7 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { } if err != nil { + c.sendAttempts = append(c.sendAttempts, attemptString+","+fmt.Sprintf("%s", err)) return err } @@ -1139,6 +1293,7 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { } c.writesReceived++ + c.sendAttempts = append(c.sendAttempts, attemptString+",ok") return nil } @@ -1150,6 +1305,18 @@ func (c *TestWriteClient) Endpoint() string { return "http://test-remote.com/1234" } +func (c *TestWriteClient) GetProtoVersions(_ context.Context) (string, error) { + return "2.0;snappy,0.1.0", nil +} + +func (c *TestWriteClient) GetLastRWHeader() string { + c.currstep++ + if len(c.steps) > 0 { + return c.steps[c.currstep].lastRWHeader + } + return "2.0;snappy,0.1.0" +} + // TestBlockingWriteClient is a queue_manager WriteClient which will block // on any calls to Store(), until the request's Context is cancelled, at which // point the `numCalls` property will contain a count of how many times Store() @@ -1162,7 +1329,7 @@ func NewTestBlockedWriteClient() *TestBlockingWriteClient { return &TestBlockingWriteClient{} } -func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) error { +func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int, _ config.RemoteWriteFormat, _ string) error { c.numCalls.Inc() <-ctx.Done() return nil @@ -1180,13 +1347,27 @@ func (c *TestBlockingWriteClient) Endpoint() string { return "http://test-remote-blocking.com/1234" } +func (c *TestBlockingWriteClient) GetProtoVersions(_ context.Context) (string, error) { + return "2.0;snappy,0.1.0", nil +} + +func (c *TestBlockingWriteClient) GetLastRWHeader() string { + return "2.0;snappy,0.1.0" +} + // For benchmarking the send and not the receive side. type NopWriteClient struct{} -func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } -func (c *NopWriteClient) Store(context.Context, []byte, int) error { return nil } -func (c *NopWriteClient) Name() string { return "nopwriteclient" } -func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } +func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } +func (c *NopWriteClient) Store(context.Context, []byte, int, config.RemoteWriteFormat, string) error { + return nil +} +func (c *NopWriteClient) Name() string { return "nopwriteclient" } +func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } +func (c *NopWriteClient) GetProtoVersions(_ context.Context) (string, error) { + return "2.0;snappy,0.1.0", nil +} +func (c *NopWriteClient) GetLastRWHeader() string { return "2.0;snappy,0.1.0" } // Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics. var extraLabels []labels.Label = []labels.Label{ @@ -1777,14 +1958,14 @@ func BenchmarkBuildWriteRequest(b *testing.B) { // Warmup buffers for i := 0; i < 10; i++ { populateTimeSeries(batch, seriesBuff, true, true) - buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil) + buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy") } b.ResetTimer() totalSize := 0 for i := 0; i < b.N; i++ { populateTimeSeries(batch, seriesBuff, true, true) - req, _, _, err := buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil) + req, _, _, err := buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy") if err != nil { b.Fatal(err) } @@ -1833,7 +2014,7 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) { // Warmup buffers for i := 0; i < 10; i++ { populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) - buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.LabelsStrings(), &pBuf, &buff, nil) + buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.LabelsStrings(), &pBuf, &buff, nil, "snappy") } b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) { @@ -1841,7 +2022,7 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) { for j := 0; j < b.N; j++ { populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true) b.ResetTimer() - req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.LabelsStrings(), &pBuf, &buff, nil) + req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.LabelsStrings(), &pBuf, &buff, nil, "snappy") if err != nil { b.Fatal(err) } @@ -1859,7 +2040,7 @@ func TestDropOldTimeSeries(t *testing.T) { nSamples := config.DefaultQueueConfig.Capacity * size samples, newSamples, series := createTimeseriesWithOldSamples(nSamples, nSeries) - // TODO: test with new version + // TODO(alexg): test with new version c := NewTestWriteClient(Version1) c.expectSamples(newSamples, series) diff --git a/storage/remote/write.go b/storage/remote/write.go index 8d584530f..88a71cce4 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -192,6 +192,24 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { continue } + // Work out what protocol and compression to use for this endpoint + // Default to Remote Write Version1 + rwFormat := Version1 + switch rwConf.ProtocolVersion { + case Version1: + // We use the standard value as there's no negotiation to be had + case Version2: + // If this newer remote write format is enabled then we need to probe the remote server + // to work out the desired protocol version and compressions + // The value of the header is kept in the client so no need to see it here + rwFormat = Version2 + _, err := c.GetProtoVersions(context.Background()) // TODO(alexg) - better ctx to pass? + if err != nil { + // TODO(alexg) - Log an error based on this? + // TODO(alexg) - if we get 405 (MethodNotAllowed) then we should default to 1.0 (and downgrade rwFormat)? + } + } + // Redacted to remove any passwords in the URL (that are // technically accepted but not recommended) since this is // only used for metric labels. @@ -214,7 +232,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.scraper, rwConf.SendExemplars, rwConf.SendNativeHistograms, - rwConf.ProtocolVersion, + rwFormat, ) // Keep track of which queues are new so we know which to start. newHashes = append(newHashes, hash) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index fe7afbfa3..7ace752e5 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -17,11 +17,16 @@ import ( "context" "errors" "fmt" + "io" "net/http" + "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -34,11 +39,68 @@ import ( ) const ( - RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" - RemoteWriteVersion1HeaderValue = "0.1.0" - RemoteWriteVersion2HeaderValue = "2.0" + RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" + RemoteWriteVersion1HeaderValue = "0.1.0" + RemoteWriteVersion20HeaderValue = "2.0" ) +func rwHeaderNameValues(rwFormat config.RemoteWriteFormat) map[string]string { + // Return the correct remote write header name/values based on provided rwFormat + ret := make(map[string]string, 1) + + switch rwFormat { + case Version1: + ret[RemoteWriteVersionHeader] = RemoteWriteVersion1HeaderValue + case Version2: + // We need to add the supported protocol definitions in order: + tuples := make([]string, 0, 2) + // Add 2.0;snappy + tuples = append(tuples, RemoteWriteVersion20HeaderValue+";snappy") + // Add default 0.1.0 + tuples = append(tuples, RemoteWriteVersion1HeaderValue) + ret[RemoteWriteVersionHeader] = strings.Join(tuples, ",") + } + return ret +} + +type writeHeadHandler struct { + logger log.Logger + + remoteWrite20HeadRequests prometheus.Counter + + // Experimental feature, new remote write proto format + // The handler will accept the new format, but it can still accept the old one + rwFormat config.RemoteWriteFormat +} + +func NewWriteHeadHandler(logger log.Logger, reg prometheus.Registerer, rwFormat config.RemoteWriteFormat) http.Handler { + h := &writeHeadHandler{ + logger: logger, + rwFormat: rwFormat, + remoteWrite20HeadRequests: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "remote_write_20_head_requests", + Help: "The number of remote write 2.0 head requests.", + }), + } + if reg != nil { + reg.MustRegister(h.remoteWrite20HeadRequests) + } + return h +} + +func (h *writeHeadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Send a response to the HEAD request based on the format supported + + // Add appropriate header values for the specific rwFormat + for hName, hValue := range rwHeaderNameValues(h.rwFormat) { + w.Header().Set(hName, hValue) + } + + w.WriteHeader(http.StatusOK) +} + type writeHandler struct { logger log.Logger appendable storage.Appendable @@ -47,7 +109,6 @@ type writeHandler struct { // Experimental feature, new remote write proto format // The handler will accept the new format, but it can still accept the old one - // TODO: this should eventually be via content negotiation? rwFormat config.RemoteWriteFormat } @@ -73,29 +134,82 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var err error - var req *prompb.WriteRequest - var reqMinStr *writev2.WriteRequest - // TODO: this should eventually be done via content negotiation/looking at the header - switch h.rwFormat { - case Version1: - req, err = DecodeWriteRequest(r.Body) - case Version2: - reqMinStr, err = DecodeMinimizedWriteRequestStr(r.Body) + // Set the header(s) in the response based on the rwFormat the server supports + for hName, hValue := range rwHeaderNameValues(h.rwFormat) { + w.Header().Set(hName, hValue) } + // Parse the headers to work out how to handle this + contentEncoding := r.Header.Get("Content-Encoding") + protoVer := r.Header.Get(RemoteWriteVersionHeader) + + switch protoVer { + case "": + // No header provided, assume 0.1.0 as everything that relies on later + protoVer = RemoteWriteVersion1HeaderValue + case RemoteWriteVersion1HeaderValue, RemoteWriteVersion20HeaderValue: + // We know this header, woo + default: + // We have a version in the header but it is not one we recognise + // TODO(alexg) - make a proper error for this? + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", "Unknown remote write version in headers", "ver", protoVer) + // Return a 406 so that the client can choose a more appropriate protocol to use + http.Error(w, "Unknown remote write version in headers", http.StatusNotAcceptable) + return + } + + // Deal with 0.1.0 clients that forget to send Content-Encoding + if protoVer == RemoteWriteVersion1HeaderValue && contentEncoding == "" { + contentEncoding = "snappy" + } + + // Read the request body + body, err := io.ReadAll(r.Body) if err != nil { level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } - // TODO: this should eventually be done detecting the format version above - switch h.rwFormat { - case Version1: - err = h.write(r.Context(), req) - case Version2: - err = h.writeMinStr(r.Context(), reqMinStr) + // Deal with contentEncoding first + var decompressed []byte + + switch contentEncoding { + case "snappy": + decompressed, err = snappy.Decode(nil, body) + if err != nil { + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + default: + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", "Unsupported Content-Encoding", "contentEncoding", contentEncoding) + // Return a 406 so that the client can choose a more appropriate protocol to use + http.Error(w, "Unsupported Content-Encoding", http.StatusNotAcceptable) + return + } + + // Now we have a decompressed buffer we can unmarshal it + // At this point we are happy with the version but need to check the encoding + switch protoVer { + case RemoteWriteVersion1HeaderValue: + var req prompb.WriteRequest + if err := proto.Unmarshal(decompressed, &req); err != nil { + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.write(r.Context(), &req) + case RemoteWriteVersion20HeaderValue: + // 2.0 request + var reqMinStr writev2.WriteRequest + if err := proto.Unmarshal(decompressed, &reqMinStr); err != nil { + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.writeMinStr(r.Context(), &reqMinStr) } switch { diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 621f8aa9f..c23011797 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -39,15 +39,94 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) +func TestRemoteWriteHeadHandler(t *testing.T) { + handler := NewWriteHeadHandler(log.NewNopLogger(), nil, Version2) + + req, err := http.NewRequest(http.MethodHead, "", nil) + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + require.Equal(t, http.StatusOK, resp.StatusCode) + + // Check header is expected value + protHeader := resp.Header.Get(RemoteWriteVersionHeader) + require.Equal(t, "2.0;snappy,0.1.0", protHeader) +} + +func TestRemoteWriteHandlerMinimizedMissingContentEncoding(t *testing.T) { + // Send a v2 request without a "Content-Encoding:" header -> 406 + buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil, "snappy") + require.NoError(t, err) + + req, err := http.NewRequest("", "", bytes.NewReader(buf)) + req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + // Do not provide "Content-Encoding: snappy" header + // req.Header.Set("Content-Encoding", "snappy") + require.NoError(t, err) + + appendable := &mockAppendable{} + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Version2) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + // Should give us a 406 + require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) +} + +func TestRemoteWriteHandlerInvalidCompression(t *testing.T) { + // Send a v2 request without an unhandled compression scheme -> 406 + buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil, "snappy") + require.NoError(t, err) + + req, err := http.NewRequest("", "", bytes.NewReader(buf)) + req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + req.Header.Set("Content-Encoding", "zstd") + require.NoError(t, err) + + appendable := &mockAppendable{} + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Version2) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + // Expect a 406 + require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) +} + +func TestRemoteWriteHandlerInvalidVersion(t *testing.T) { + // Send a protocol version number that isn't recognised/supported -> 406 + buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil, "snappy") + require.NoError(t, err) + + req, err := http.NewRequest("", "", bytes.NewReader(buf)) + req.Header.Set(RemoteWriteVersionHeader, "3.0") + require.NoError(t, err) + + appendable := &mockAppendable{} + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Version2) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + // Expect a 406 + require.Equal(t, http.StatusNotAcceptable, resp.StatusCode) +} + func TestRemoteWriteHandler(t *testing.T) { - buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil) + buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) require.NoError(t, err) appendable := &mockAppendable{} - // TODO: test with other proto format(s) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Version1) recorder := httptest.NewRecorder() @@ -56,6 +135,10 @@ func TestRemoteWriteHandler(t *testing.T) { resp := recorder.Result() require.Equal(t, http.StatusNoContent, resp.StatusCode) + // Check header is expected value + protHeader := resp.Header.Get(RemoteWriteVersionHeader) + require.Equal(t, "0.1.0", protHeader) + i := 0 j := 0 k := 0 @@ -87,16 +170,17 @@ func TestRemoteWriteHandler(t *testing.T) { } func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) { - buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil) + buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) - req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion2HeaderValue) + req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + // Must provide "Content-Encoding: snappy" header + req.Header.Set("Content-Encoding", "snappy") require.NoError(t, err) appendable := &mockAppendable{} - // TODO: test with other proto format(s) - handler := NewWriteHandler(nil, nil, appendable, Version2) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Version2) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -104,6 +188,10 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) { resp := recorder.Result() require.Equal(t, http.StatusNoContent, resp.StatusCode) + // Check header is expected value + protHeader := resp.Header.Get(RemoteWriteVersionHeader) + require.Equal(t, "2.0;snappy,0.1.0", protHeader) + i := 0 j := 0 k := 0 @@ -142,7 +230,7 @@ func TestOutOfOrderSample(t *testing.T) { buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, - }}, nil, nil, nil, nil) + }}, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -168,7 +256,7 @@ func TestOutOfOrderExemplar(t *testing.T) { buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}}, - }}, nil, nil, nil, nil) + }}, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -192,7 +280,7 @@ func TestOutOfOrderHistogram(t *testing.T) { buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{ Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}}, Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))}, - }}, nil, nil, nil, nil) + }}, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -222,7 +310,7 @@ func BenchmarkRemoteWritehandler(b *testing.B) { {Name: "test_label_name_" + num, Value: labelValue + num}, }, Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)}, - }}, nil, nil, nil, nil) + }}, nil, nil, nil, nil, "snappy") require.NoError(b, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) require.NoError(b, err) @@ -241,7 +329,7 @@ func BenchmarkRemoteWritehandler(b *testing.B) { } func TestCommitErr(t *testing.T) { - buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil) + buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy") require.NoError(t, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -280,7 +368,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { // TODO: test with other proto format(s) handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), Version1) - buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil) + buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) req, err := http.NewRequest("", "", bytes.NewReader(buf)) @@ -293,7 +381,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { var bufRequests [][]byte for i := 0; i < 100; i++ { - buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil) + buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) bufRequests = append(bufRequests, buf) } diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 4d71ed93d..203ed6d14 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -217,9 +217,10 @@ type API struct { isAgent bool statsRenderer StatsRenderer - remoteWriteHandler http.Handler - remoteReadHandler http.Handler - otlpWriteHandler http.Handler + remoteWriteHeadHandler http.Handler + remoteWriteHandler http.Handler + remoteReadHandler http.Handler + otlpWriteHandler http.Handler codecs []Codec } @@ -296,7 +297,20 @@ func NewAPI( } if rwEnabled { + // TODO(alexg) - Two phase rwFormat rollout needs to create handlers with flag for advertising + // For rollout we do two phases: + // 0. (Before) no flags set + // 1. (During) support new protocols but don't advertise + // + // 2. (After) support new protocols and advertise + // + // For rollback the two phases are: + // 0. (Before) support new protocols and advertise + // 1. (During) support new protocols but don't advertise + // + // 2. (After) no flags set a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, rwFormat) + a.remoteWriteHeadHandler = remote.NewWriteHeadHandler(logger, registerer, rwFormat) } if otlpEnabled { a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap) @@ -393,6 +407,7 @@ func (api *API) Register(r *route.Router) { r.Get("/status/walreplay", api.serveWALReplayStatus) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) + r.Head("/write", api.remoteWriteHead) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) r.Get("/alerts", wrapAgent(api.alerts)) @@ -1616,6 +1631,14 @@ func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { } } +func (api *API) remoteWriteHead(w http.ResponseWriter, r *http.Request) { + if api.remoteWriteHeadHandler != nil { + api.remoteWriteHeadHandler.ServeHTTP(w, r) + } else { + http.Error(w, "remote write receiver needs to be enabled with --web.enable-remote-write-receiver", http.StatusNotFound) + } +} + func (api *API) remoteWrite(w http.ResponseWriter, r *http.Request) { if api.remoteWriteHandler != nil { api.remoteWriteHandler.ServeHTTP(w, r) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 5a0f8a7ab..44f180561 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -337,6 +337,46 @@ var sampleFlagMap = map[string]string{ "flag2": "value2", } +func TestHeadEndpoint(t *testing.T) { + for _, tc := range []struct { + name string + rwFormat config.RemoteWriteFormat + expectedStatusCode int + expectedHeaderValue string + }{ + { + name: "HEAD Version 1", + rwFormat: remote.Version1, + expectedStatusCode: http.StatusOK, + expectedHeaderValue: "0.1.0", + }, + { + name: "HEAD Version 2", + rwFormat: remote.Version2, + expectedStatusCode: http.StatusOK, + expectedHeaderValue: "2.0;snappy,0.1.0", + }, + } { + r := route.New() + api := &API{remoteWriteHeadHandler: remote.NewWriteHeadHandler(log.NewNopLogger(), nil, tc.rwFormat), + ready: func(f http.HandlerFunc) http.HandlerFunc { return f }} + api.Register(r) + + s := httptest.NewServer(r) + defer s.Close() + + req, err := http.NewRequest("HEAD", s.URL+"/write", nil) + require.NoError(t, err, "Error creating HEAD request") + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err, "Error executing HEAD request") + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + + promHeader := resp.Header.Get(remote.RemoteWriteVersionHeader) + require.Equal(t, tc.expectedHeaderValue, promHeader) + } +} + func TestEndpoints(t *testing.T) { storage := promql.LoadedStorage(t, ` load 1m