diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index d55f54df9f..a5a0a176ed 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -154,7 +154,8 @@ type flagConfig struct {
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
- enableSenderRemoteWrite11 bool
+ // todo: how to use the enable feature flag properly + use the remote format enum type
+ rwFormat int
prometheusURL string
corsRegexString string
@@ -211,11 +212,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
continue
case "promql-at-modifier", "promql-negative-offset":
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
- case "rw-1-1-sender":
- c.enableSenderRemoteWrite11 = true
- level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
- case "rw-1-1-receiver":
- c.web.EnableReceiverRemoteWrite11 = true
default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
}
@@ -429,6 +425,9 @@ func main() {
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
+ a.Flag("remote-write-format", "remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format)").
+ Default("0").IntVar(&cfg.rwFormat)
+
promlogflag.AddFlags(a, &cfg.promlogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
@@ -601,7 +600,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
- remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11)
+ remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, remote.RemoteWriteFormat(cfg.rwFormat))
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
@@ -725,6 +724,7 @@ func main() {
cfg.web.Flags[f.Name] = f.Value.String()
}
+ cfg.web.RemoteWriteFormat = remote.RemoteWriteFormat(cfg.rwFormat)
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index 78ec205f24..377a6f77ac 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -53,6 +53,7 @@ The Prometheus monitoring server
| --query.max-concurrency
| Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| --query.max-samples
| Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| --enable-feature
| Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
+| --remote-write-format
| remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format) | `0` |
| --log.level
| Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| --log.format
| Output format of log messages. One of: [logfmt, json] | `logfmt` |
diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go
index 90e9f743b1..7c6511b409 100644
--- a/documentation/examples/remote_storage/example_write_adapter/server.go
+++ b/documentation/examples/remote_storage/example_write_adapter/server.go
@@ -57,38 +57,6 @@ func main() {
}
})
- http.HandleFunc("/receiveReduced", func(w http.ResponseWriter, r *http.Request) {
- req, err := remote.DecodeReducedWriteRequest(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- for _, ts := range req.Timeseries {
- m := make(model.Metric, len(ts.Labels))
- for _, l := range ts.Labels {
- m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
- }
-
- for _, s := range ts.Samples {
- fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
- }
-
- for _, e := range ts.Exemplars {
- m := make(model.Metric, len(e.Labels))
- for _, l := range e.Labels {
- m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
- }
- fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
- }
-
- for _, hp := range ts.Histograms {
- h := remote.HistogramProtoToHistogram(hp)
- fmt.Printf("\tHistogram: %s\n", h.String())
- }
- }
- })
-
http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeMinimizedWriteRequest(r.Body)
if err != nil {
@@ -97,8 +65,25 @@ func main() {
}
for _, ts := range req.Timeseries {
- ls := remote.Uint32RefToLabels(req.Symbols, ts.LabelSymbols)
- fmt.Println(ls)
+ m := make(model.Metric, len(ts.LabelSymbols)/2)
+ labelIdx := 0
+
+ for labelIdx < len(ts.LabelSymbols) {
+ // todo, check for overflow?
+ offset := ts.LabelSymbols[labelIdx]
+ labelIdx++
+ length := ts.LabelSymbols[labelIdx]
+ labelIdx++
+ name := req.Symbols[offset : offset+length]
+ // todo, check for overflow?
+ offset = ts.LabelSymbols[labelIdx]
+ labelIdx++
+ length = ts.LabelSymbols[labelIdx]
+ labelIdx++
+ value := req.Symbols[offset : offset+length]
+ m[model.LabelName(name)] = model.LabelValue(value)
+ }
+ fmt.Println(m)
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
diff --git a/prompb/custom.go b/prompb/custom.go
index 7f97b2734b..613338b3eb 100644
--- a/prompb/custom.go
+++ b/prompb/custom.go
@@ -14,8 +14,9 @@
package prompb
import (
- "slices"
"sync"
+
+ "golang.org/x/exp/slices"
)
func (m Sample) T() int64 { return m.Timestamp }
diff --git a/prompb/custom_test.go b/prompb/custom_test.go
index b12f49a338..916dbc7336 100644
--- a/prompb/custom_test.go
+++ b/prompb/custom_test.go
@@ -1,9 +1,21 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
package prompb
import (
"testing"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestOptimizedMarshal(t *testing.T) {
@@ -63,15 +75,15 @@ func TestOptimizedMarshal(t *testing.T) {
got = got[:0]
// should be the same as the standard marshal
expected, err := tt.m.Marshal()
- assert.NoError(t, err)
+ require.NoError(t, err)
got, err = tt.m.OptimizedMarshal(got)
- assert.NoError(t, err)
- assert.Equal(t, expected, got)
+ require.NoError(t, err)
+ require.Equal(t, expected, got)
// round trip
m := &MinimizedWriteRequest{}
- assert.NoError(t, m.Unmarshal(got))
- assert.Equal(t, tt.m, m)
+ require.NoError(t, m.Unmarshal(got))
+ require.Equal(t, tt.m, m)
})
}
}
diff --git a/prompb/remote.pb.go b/prompb/remote.pb.go
index c8c24d8bde..2fb98f7737 100644
--- a/prompb/remote.pb.go
+++ b/prompb/remote.pb.go
@@ -60,7 +60,7 @@ func (x ReadRequest_ResponseType) String() string {
}
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{2, 0}
+ return fileDescriptor_eefc82927d57d89b, []int{3, 0}
}
type WriteRequest struct {
@@ -175,6 +175,63 @@ func (m *MinimizedWriteRequest) GetSymbols() string {
return ""
}
+type MinimizedWriteRequestLen struct {
+ Timeseries []MinimizedTimeSeriesLen `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
+ // The symbols table. All symbols are concatenated strings prepended with a varint of their length.
+ // To read the symbols table, it's required to know the offset of the actual symbol to read from this string.
+ Symbols []byte `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MinimizedWriteRequestLen) Reset() { *m = MinimizedWriteRequestLen{} }
+func (m *MinimizedWriteRequestLen) String() string { return proto.CompactTextString(m) }
+func (*MinimizedWriteRequestLen) ProtoMessage() {}
+func (*MinimizedWriteRequestLen) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eefc82927d57d89b, []int{2}
+}
+func (m *MinimizedWriteRequestLen) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MinimizedWriteRequestLen) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MinimizedWriteRequestLen.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MinimizedWriteRequestLen) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MinimizedWriteRequestLen.Merge(m, src)
+}
+func (m *MinimizedWriteRequestLen) XXX_Size() int {
+ return m.Size()
+}
+func (m *MinimizedWriteRequestLen) XXX_DiscardUnknown() {
+ xxx_messageInfo_MinimizedWriteRequestLen.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MinimizedWriteRequestLen proto.InternalMessageInfo
+
+func (m *MinimizedWriteRequestLen) GetTimeseries() []MinimizedTimeSeriesLen {
+ if m != nil {
+ return m.Timeseries
+ }
+ return nil
+}
+
+func (m *MinimizedWriteRequestLen) GetSymbols() []byte {
+ if m != nil {
+ return m.Symbols
+ }
+ return nil
+}
+
// ReadRequest represents a remote read request.
type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
@@ -193,7 +250,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{2}
+ return fileDescriptor_eefc82927d57d89b, []int{3}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -249,7 +306,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{3}
+ return fileDescriptor_eefc82927d57d89b, []int{4}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -299,7 +356,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{4}
+ return fileDescriptor_eefc82927d57d89b, []int{5}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -368,7 +425,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{5}
+ return fileDescriptor_eefc82927d57d89b, []int{6}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -421,7 +478,7 @@ func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eefc82927d57d89b, []int{6}
+ return fileDescriptor_eefc82927d57d89b, []int{7}
}
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -468,6 +525,7 @@ func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest")
+ proto.RegisterType((*MinimizedWriteRequestLen)(nil), "prometheus.MinimizedWriteRequestLen")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query")
@@ -478,41 +536,43 @@ func init() {
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{
- // 543 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40,
- 0x10, 0xae, 0xeb, 0xb4, 0x09, 0xe3, 0x10, 0x99, 0x6d, 0x43, 0x4c, 0x0e, 0x49, 0x64, 0x71, 0x88,
- 0x54, 0x14, 0x44, 0xa8, 0x38, 0xf5, 0x40, 0x5a, 0x22, 0x95, 0x52, 0xf3, 0xb3, 0x09, 0x02, 0x21,
- 0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0x53, 0xef, 0x5a, 0x6a, 0x38, 0xf3, 0x00, 0x3c, 0x13,
- 0xa7, 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x0d, 0x20, 0xc4, 0xcd, 0xfb,
- 0xfd, 0xcd, 0xec, 0xec, 0x18, 0xea, 0x09, 0x06, 0x11, 0xc7, 0x41, 0x9c, 0x44, 0x3c, 0x22, 0x10,
- 0x27, 0x51, 0x80, 0x7c, 0x8e, 0x29, 0x6b, 0x6b, 0x7c, 0x19, 0x23, 0xcb, 0x89, 0xf6, 0xfe, 0x45,
- 0x74, 0x11, 0x89, 0xcf, 0xfb, 0xd9, 0x57, 0x8e, 0x9a, 0x9f, 0x15, 0xa8, 0xbf, 0x49, 0x7c, 0x8e,
- 0x14, 0x2f, 0x53, 0x64, 0x9c, 0x1c, 0x01, 0x70, 0x3f, 0x40, 0x86, 0x89, 0x8f, 0xcc, 0x50, 0x7a,
- 0x6a, 0x5f, 0x1b, 0xde, 0x1e, 0xfc, 0x0a, 0x1d, 0x4c, 0xfd, 0x00, 0x27, 0x82, 0x3d, 0xae, 0x5c,
- 0x7f, 0xef, 0x6e, 0x51, 0x49, 0x4f, 0x8e, 0xa0, 0x16, 0x20, 0x77, 0x3c, 0x87, 0x3b, 0x86, 0x2a,
- 0xbc, 0x6d, 0xd9, 0x6b, 0x21, 0x4f, 0x7c, 0xd7, 0x2a, 0x14, 0x85, 0x7f, 0xed, 0x38, 0xab, 0xd4,
- 0xb6, 0x75, 0xd5, 0xfc, 0xa4, 0x40, 0xd3, 0xf2, 0x43, 0x3f, 0xf0, 0x3f, 0xa2, 0xb7, 0xd1, 0xdb,
- 0xf8, 0x2f, 0xbd, 0x75, 0x37, 0xf2, 0x4b, 0xdb, 0x3f, 0x9b, 0x34, 0xa0, 0xca, 0x96, 0xc1, 0x2c,
- 0x5a, 0x30, 0xa3, 0xd2, 0x53, 0xfa, 0x37, 0x68, 0x79, 0xcc, 0x1b, 0x38, 0xab, 0xd4, 0x54, 0xbd,
- 0x62, 0x7e, 0x55, 0x40, 0xa3, 0xe8, 0x78, 0x65, 0xf1, 0x03, 0xa8, 0x5e, 0xa6, 0x72, 0xe5, 0x5b,
- 0x72, 0xe5, 0x57, 0x29, 0x26, 0x4b, 0x5a, 0x2a, 0xc8, 0x7b, 0x68, 0x39, 0xae, 0x8b, 0x31, 0x47,
- 0xcf, 0x4e, 0x90, 0xc5, 0x51, 0xc8, 0xd0, 0x16, 0xaf, 0x61, 0x6c, 0xf7, 0xd4, 0x7e, 0x63, 0x78,
- 0x57, 0x36, 0x4b, 0x65, 0x06, 0xb4, 0x50, 0x4f, 0x97, 0x31, 0xd2, 0x66, 0x19, 0x22, 0xa3, 0xcc,
- 0x3c, 0x84, 0xba, 0x0c, 0x10, 0x0d, 0xaa, 0x93, 0x91, 0xf5, 0xf2, 0x7c, 0x3c, 0xd1, 0xb7, 0x48,
- 0x0b, 0xf6, 0x26, 0x53, 0x3a, 0x1e, 0x59, 0xe3, 0x27, 0xf6, 0xdb, 0x17, 0xd4, 0x3e, 0x39, 0x7d,
- 0xfd, 0xfc, 0xd9, 0x44, 0x57, 0xcc, 0x51, 0xe6, 0x72, 0xd6, 0x51, 0xe4, 0x01, 0x54, 0x13, 0x64,
- 0xe9, 0x82, 0x97, 0x17, 0x6a, 0xfd, 0x79, 0x21, 0xc1, 0xd3, 0x52, 0x67, 0x7e, 0x51, 0x60, 0x47,
- 0x10, 0xe4, 0x1e, 0x10, 0xc6, 0x9d, 0x84, 0xdb, 0x62, 0xae, 0xdc, 0x09, 0x62, 0x3b, 0xc8, 0x72,
- 0x94, 0xbe, 0x4a, 0x75, 0xc1, 0x4c, 0x4b, 0xc2, 0x62, 0xa4, 0x0f, 0x3a, 0x86, 0xde, 0xa6, 0x76,
- 0x5b, 0x68, 0x1b, 0x18, 0x7a, 0xb2, 0xf2, 0x10, 0x6a, 0x81, 0xc3, 0xdd, 0x39, 0x26, 0xac, 0x58,
- 0x20, 0x43, 0xee, 0xea, 0xdc, 0x99, 0xe1, 0xc2, 0xca, 0x05, 0x74, 0xad, 0x24, 0x07, 0xb0, 0x33,
- 0xf7, 0x43, 0x9e, 0xbf, 0xa7, 0x36, 0x6c, 0xfe, 0x3e, 0xdc, 0xd3, 0x8c, 0xa4, 0xb9, 0xc6, 0x1c,
- 0x83, 0x26, 0x5d, 0x8e, 0x3c, 0xfa, 0xff, 0x85, 0x97, 0xb7, 0xc8, 0xbc, 0x82, 0xbd, 0x93, 0x79,
- 0x1a, 0x7e, 0xc8, 0x1e, 0x47, 0x9a, 0xea, 0x63, 0x68, 0xb8, 0x39, 0x6c, 0x6f, 0x44, 0xde, 0x91,
- 0x23, 0x0b, 0x63, 0x91, 0x7a, 0xd3, 0x95, 0x8f, 0xa4, 0x0b, 0x5a, 0xb6, 0x46, 0x4b, 0xdb, 0x0f,
- 0x3d, 0xbc, 0x2a, 0xe6, 0x04, 0x02, 0x7a, 0x9a, 0x21, 0xc7, 0xfb, 0xd7, 0xab, 0x8e, 0xf2, 0x6d,
- 0xd5, 0x51, 0x7e, 0xac, 0x3a, 0xca, 0xbb, 0xdd, 0x2c, 0x37, 0x9e, 0xcd, 0x76, 0xc5, 0x0f, 0xfd,
- 0xf0, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x3e, 0xdc, 0x81, 0x0f, 0x04, 0x00, 0x00,
+ // 568 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4b, 0x6f, 0xd3, 0x40,
+ 0x10, 0xee, 0xd6, 0x69, 0x13, 0xc6, 0xa1, 0x32, 0xdb, 0x96, 0x9a, 0x1e, 0x9a, 0xc8, 0xe2, 0x10,
+ 0xa9, 0x28, 0x88, 0x50, 0x71, 0xea, 0x81, 0xb4, 0x44, 0x0a, 0x25, 0xe6, 0xb1, 0x09, 0x02, 0x21,
+ 0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0xa8, 0x77, 0x2d, 0x35, 0x9c, 0x39, 0x71, 0xe2, 0x37,
+ 0x71, 0xea, 0x09, 0xf1, 0x0b, 0x10, 0xca, 0x2f, 0x41, 0x7e, 0x85, 0x0d, 0x44, 0x94, 0xdb, 0xee,
+ 0x7c, 0x8f, 0xfd, 0x76, 0x76, 0x6c, 0xa8, 0xc7, 0xe8, 0x87, 0x02, 0xdb, 0x51, 0x1c, 0x8a, 0x90,
+ 0x42, 0x14, 0x87, 0x3e, 0x8a, 0x09, 0x26, 0x7c, 0x5f, 0x15, 0xb3, 0x08, 0x79, 0x0e, 0xec, 0xef,
+ 0x9c, 0x87, 0xe7, 0x61, 0xb6, 0xbc, 0x9f, 0xae, 0xf2, 0xaa, 0xf1, 0x85, 0x40, 0xfd, 0x4d, 0xec,
+ 0x09, 0x64, 0x78, 0x91, 0x20, 0x17, 0xf4, 0x18, 0x40, 0x78, 0x3e, 0x72, 0x8c, 0x3d, 0xe4, 0x3a,
+ 0x69, 0x2a, 0x2d, 0xb5, 0x73, 0xbb, 0xfd, 0xdb, 0xb4, 0x3d, 0xf2, 0x7c, 0x1c, 0x66, 0xe8, 0x49,
+ 0xe5, 0xea, 0x47, 0x63, 0x8d, 0x49, 0x7c, 0x7a, 0x0c, 0x35, 0x1f, 0x85, 0xed, 0xda, 0xc2, 0xd6,
+ 0x95, 0x4c, 0xbb, 0x2f, 0x6b, 0x4d, 0x14, 0xb1, 0xe7, 0x98, 0x05, 0xa3, 0xd0, 0x2f, 0x14, 0x67,
+ 0x95, 0xda, 0xba, 0xa6, 0x18, 0x9f, 0x08, 0xec, 0x9a, 0x5e, 0xe0, 0xf9, 0xde, 0x47, 0x74, 0x97,
+ 0xb2, 0xf5, 0x56, 0x64, 0x6b, 0x2c, 0xf9, 0x97, 0xb2, 0x7f, 0x86, 0xd4, 0xa1, 0xca, 0x67, 0xfe,
+ 0x38, 0x9c, 0x72, 0xbd, 0xd2, 0x24, 0xad, 0x1b, 0xac, 0xdc, 0xe6, 0x01, 0xce, 0x2a, 0x35, 0x45,
+ 0xab, 0x18, 0x9f, 0x09, 0xe8, 0x2b, 0x63, 0x0c, 0x30, 0xa0, 0xfd, 0x15, 0x49, 0x8c, 0x6b, 0x92,
+ 0x0c, 0x30, 0xb8, 0x3e, 0x4c, 0x7d, 0x75, 0x98, 0x6f, 0x04, 0x54, 0x86, 0xb6, 0x5b, 0x76, 0xe2,
+ 0x10, 0xaa, 0x17, 0x89, 0x7c, 0xf8, 0x2d, 0xf9, 0xf0, 0x57, 0x09, 0xc6, 0x33, 0x56, 0x32, 0xe8,
+ 0x7b, 0xd8, 0xb3, 0x1d, 0x07, 0x23, 0x81, 0xae, 0x15, 0x23, 0x8f, 0xc2, 0x80, 0xa3, 0x95, 0x8d,
+ 0x86, 0xbe, 0xde, 0x54, 0x5a, 0x5b, 0x9d, 0xbb, 0xb2, 0x58, 0x3a, 0xa6, 0xcd, 0x0a, 0xf6, 0x68,
+ 0x16, 0x21, 0xdb, 0x2d, 0x4d, 0xe4, 0x2a, 0x37, 0x8e, 0xa0, 0x2e, 0x17, 0xa8, 0x0a, 0xd5, 0x61,
+ 0xd7, 0x7c, 0x39, 0xe8, 0x0d, 0xb5, 0x35, 0xba, 0x07, 0xdb, 0xc3, 0x11, 0xeb, 0x75, 0xcd, 0xde,
+ 0x13, 0xeb, 0xed, 0x0b, 0x66, 0x9d, 0xf6, 0x5f, 0x3f, 0x7f, 0x36, 0xd4, 0x88, 0xd1, 0x4d, 0x55,
+ 0xf6, 0xc2, 0x8a, 0x3e, 0x80, 0x6a, 0x8c, 0x3c, 0x99, 0x8a, 0xf2, 0x42, 0x7b, 0x7f, 0x5f, 0x28,
+ 0xc3, 0x59, 0xc9, 0x33, 0xbe, 0x12, 0xd8, 0xc8, 0x00, 0x7a, 0x0f, 0x28, 0x17, 0x76, 0x2c, 0xac,
+ 0xac, 0xaf, 0xc2, 0xf6, 0x23, 0xcb, 0x4f, 0x7d, 0x48, 0x4b, 0x61, 0x5a, 0x86, 0x8c, 0x4a, 0xc0,
+ 0xe4, 0xb4, 0x05, 0x1a, 0x06, 0xee, 0x32, 0x77, 0x3d, 0xe3, 0x6e, 0x61, 0xe0, 0xca, 0xcc, 0x23,
+ 0xa8, 0xf9, 0xb6, 0x70, 0x26, 0x18, 0xf3, 0x62, 0x9a, 0x75, 0x39, 0xd5, 0xc0, 0x1e, 0xe3, 0xd4,
+ 0xcc, 0x09, 0x6c, 0xc1, 0xa4, 0x87, 0xb0, 0x31, 0xf1, 0x02, 0x91, 0xbf, 0xa7, 0xda, 0xd9, 0xfd,
+ 0xb3, 0xb9, 0xfd, 0x14, 0x64, 0x39, 0xc7, 0xe8, 0x81, 0x2a, 0x5d, 0x8e, 0x3e, 0xfa, 0xff, 0xaf,
+ 0x4f, 0x9e, 0x22, 0xe3, 0x12, 0xb6, 0x4f, 0x27, 0x49, 0xf0, 0x21, 0x7d, 0x1c, 0xa9, 0xab, 0x8f,
+ 0x61, 0xcb, 0xc9, 0xcb, 0xd6, 0x92, 0xe5, 0x1d, 0xd9, 0xb2, 0x10, 0x16, 0xae, 0x37, 0x1d, 0x79,
+ 0x4b, 0x1b, 0xa0, 0xa6, 0x63, 0x34, 0xb3, 0xbc, 0xc0, 0xc5, 0xcb, 0xa2, 0x4f, 0x90, 0x95, 0x9e,
+ 0xa6, 0x95, 0x93, 0x9d, 0xab, 0xf9, 0x01, 0xf9, 0x3e, 0x3f, 0x20, 0x3f, 0xe7, 0x07, 0xe4, 0xdd,
+ 0x66, 0xea, 0x1b, 0x8d, 0xc7, 0x9b, 0xd9, 0xdf, 0xe5, 0xe1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xf1, 0x65, 0x72, 0x0c, 0x9c, 0x04, 0x00, 0x00,
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@@ -618,6 +678,54 @@ func (m *MinimizedWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *MinimizedWriteRequestLen) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MinimizedWriteRequestLen) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MinimizedWriteRequestLen) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Symbols) > 0 {
+ i -= len(m.Symbols)
+ copy(dAtA[i:], m.Symbols)
+ i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Timeseries) > 0 {
+ for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRemote(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *ReadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -925,6 +1033,28 @@ func (m *MinimizedWriteRequest) Size() (n int) {
return n
}
+func (m *MinimizedWriteRequestLen) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Timeseries) > 0 {
+ for _, e := range m.Timeseries {
+ l = e.Size()
+ n += 1 + l + sovRemote(uint64(l))
+ }
+ }
+ l = len(m.Symbols)
+ if l > 0 {
+ n += 1 + l + sovRemote(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
func (m *ReadRequest) Size() (n int) {
if m == nil {
return 0
@@ -1277,6 +1407,125 @@ func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *MinimizedWriteRequestLen) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRemote
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MinimizedWriteRequestLen: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MinimizedWriteRequestLen: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRemote
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRemote
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRemote
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesLen{})
+ if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRemote
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRemote
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRemote
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Symbols = append(m.Symbols[:0], dAtA[iNdEx:postIndex]...)
+ if m.Symbols == nil {
+ m.Symbols = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRemote(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRemote
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ReadRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/prompb/remote.proto b/prompb/remote.proto
index ac6a74be20..7b3747e336 100644
--- a/prompb/remote.proto
+++ b/prompb/remote.proto
@@ -27,7 +27,6 @@ message WriteRequest {
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
}
-
message MinimizedWriteRequest {
repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
@@ -40,6 +39,18 @@ message MinimizedWriteRequest {
string symbols = 4;
}
+message MinimizedWriteRequestLen {
+ repeated MinimizedTimeSeriesLen timeseries = 1 [(gogoproto.nullable) = false];
+ // Cortex uses this field to determine the source of the write request.
+ // We reserve it to avoid any compatibility issues.
+ reserved 2;
+ // Metadata (3) has moved to be part of the TimeSeries type
+ reserved 3;
+ // The symbols table. All symbols are concatenated strings prepended with a varint of their length.
+ // To read the symbols table, it's required to know the offset of the actual symbol to read from this string.
+ bytes symbols = 4;
+}
+
// ReadRequest represents a remote read request.
message ReadRequest {
repeated Query queries = 1;
diff --git a/prompb/types.pb.go b/prompb/types.pb.go
index b19fb1621c..f1e220ce7e 100644
--- a/prompb/types.pb.go
+++ b/prompb/types.pb.go
@@ -127,7 +127,7 @@ func (x LabelMatcher_Type) String() string {
}
func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{9, 0}
+ return fileDescriptor_d938547f84707355, []int{10, 0}
}
// We require this to match chunkenc.Encoding.
@@ -159,7 +159,7 @@ func (x Chunk_Encoding) String() string {
}
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{11, 0}
+ return fileDescriptor_d938547f84707355, []int{12, 0}
}
type MetricMetadata struct {
@@ -736,6 +736,7 @@ func (m *TimeSeries) GetHistograms() []Histogram {
type MinimizedTimeSeries struct {
// Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length).
+ // Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
LabelSymbols []uint32 `protobuf:"varint,1,rep,packed,name=label_symbols,json=labelSymbols,proto3" json:"label_symbols,omitempty"`
// Sorted by time, oldest sample first.
// TODO: support references for other types
@@ -808,6 +809,82 @@ func (m *MinimizedTimeSeries) GetHistograms() []Histogram {
return nil
}
+type MinimizedTimeSeriesLen struct {
+ // Sorted list of label name-value pair references, encoded as 32bit uint. This
+ // list's real len is always multiple of 2, label name offset/label value offset.
+ // Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
+ LabelSymbols []uint32 `protobuf:"fixed32,1,rep,packed,name=label_symbols,json=labelSymbols,proto3" json:"label_symbols,omitempty"`
+ // Sorted by time, oldest sample first.
+ // TODO: support references for other types
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+ Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
+ Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MinimizedTimeSeriesLen) Reset() { *m = MinimizedTimeSeriesLen{} }
+func (m *MinimizedTimeSeriesLen) String() string { return proto.CompactTextString(m) }
+func (*MinimizedTimeSeriesLen) ProtoMessage() {}
+func (*MinimizedTimeSeriesLen) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d938547f84707355, []int{7}
+}
+func (m *MinimizedTimeSeriesLen) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MinimizedTimeSeriesLen) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MinimizedTimeSeriesLen.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MinimizedTimeSeriesLen) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MinimizedTimeSeriesLen.Merge(m, src)
+}
+func (m *MinimizedTimeSeriesLen) XXX_Size() int {
+ return m.Size()
+}
+func (m *MinimizedTimeSeriesLen) XXX_DiscardUnknown() {
+ xxx_messageInfo_MinimizedTimeSeriesLen.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MinimizedTimeSeriesLen proto.InternalMessageInfo
+
+func (m *MinimizedTimeSeriesLen) GetLabelSymbols() []uint32 {
+ if m != nil {
+ return m.LabelSymbols
+ }
+ return nil
+}
+
+func (m *MinimizedTimeSeriesLen) GetSamples() []Sample {
+ if m != nil {
+ return m.Samples
+ }
+ return nil
+}
+
+func (m *MinimizedTimeSeriesLen) GetExemplars() []Exemplar {
+ if m != nil {
+ return m.Exemplars
+ }
+ return nil
+}
+
+func (m *MinimizedTimeSeriesLen) GetHistograms() []Histogram {
+ if m != nil {
+ return m.Histograms
+ }
+ return nil
+}
+
type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@@ -820,7 +897,7 @@ func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{7}
+ return fileDescriptor_d938547f84707355, []int{8}
}
func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -874,7 +951,7 @@ func (m *Labels) Reset() { *m = Labels{} }
func (m *Labels) String() string { return proto.CompactTextString(m) }
func (*Labels) ProtoMessage() {}
func (*Labels) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{8}
+ return fileDescriptor_d938547f84707355, []int{9}
}
func (m *Labels) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -924,7 +1001,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{9}
+ return fileDescriptor_d938547f84707355, []int{10}
}
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -991,7 +1068,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} }
func (m *ReadHints) String() string { return proto.CompactTextString(m) }
func (*ReadHints) ProtoMessage() {}
func (*ReadHints) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{10}
+ return fileDescriptor_d938547f84707355, []int{11}
}
func (m *ReadHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1085,7 +1162,7 @@ func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{11}
+ return fileDescriptor_d938547f84707355, []int{12}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1157,7 +1234,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_d938547f84707355, []int{12}
+ return fileDescriptor_d938547f84707355, []int{13}
}
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1212,6 +1289,7 @@ func init() {
proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan")
proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries")
proto.RegisterType((*MinimizedTimeSeries)(nil), "prometheus.MinimizedTimeSeries")
+ proto.RegisterType((*MinimizedTimeSeriesLen)(nil), "prometheus.MinimizedTimeSeriesLen")
proto.RegisterType((*Label)(nil), "prometheus.Label")
proto.RegisterType((*Labels)(nil), "prometheus.Labels")
proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher")
@@ -1223,78 +1301,79 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{
- // 1129 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
- 0x14, 0x5e, 0xdb, 0x89, 0x13, 0x9f, 0xfc, 0xd4, 0x3b, 0xdd, 0x16, 0x53, 0xd1, 0x6d, 0xb0, 0x54,
- 0x08, 0x08, 0x65, 0xd5, 0x85, 0x0b, 0x2a, 0x2a, 0xa4, 0xdd, 0x6d, 0xf6, 0x47, 0xac, 0x13, 0x75,
- 0x92, 0x15, 0x94, 0x1b, 0x6b, 0x92, 0xcc, 0x26, 0x56, 0xfd, 0x87, 0x67, 0x52, 0x6d, 0xfa, 0x1e,
- 0xdc, 0xf1, 0x12, 0xbc, 0x45, 0x25, 0x6e, 0xe0, 0x05, 0x10, 0xda, 0x2b, 0x24, 0x5e, 0x02, 0xcd,
- 0xd8, 0x8e, 0x9d, 0x6e, 0x91, 0x28, 0x77, 0xdc, 0xcd, 0xf9, 0xce, 0xdf, 0x37, 0x73, 0xe6, 0xcc,
- 0x19, 0x68, 0xf0, 0x55, 0x4c, 0x59, 0x2f, 0x4e, 0x22, 0x1e, 0x21, 0x88, 0x93, 0x28, 0xa0, 0x7c,
- 0x41, 0x97, 0xec, 0xde, 0xce, 0x3c, 0x9a, 0x47, 0x12, 0xde, 0x13, 0xab, 0xd4, 0xc2, 0xfe, 0x49,
- 0x85, 0xb6, 0x43, 0x79, 0xe2, 0x4d, 0x1d, 0xca, 0xc9, 0x8c, 0x70, 0x82, 0x1e, 0x43, 0x45, 0xc4,
- 0xb0, 0x94, 0x8e, 0xd2, 0x6d, 0xef, 0x3f, 0xec, 0x15, 0x31, 0x7a, 0x9b, 0x96, 0x99, 0x38, 0x5e,
- 0xc5, 0x14, 0x4b, 0x17, 0xf4, 0x19, 0xa0, 0x40, 0x62, 0xee, 0x25, 0x09, 0x3c, 0x7f, 0xe5, 0x86,
- 0x24, 0xa0, 0x96, 0xda, 0x51, 0xba, 0x06, 0x36, 0x53, 0xcd, 0xb1, 0x54, 0x0c, 0x48, 0x40, 0x11,
- 0x82, 0xca, 0x82, 0xfa, 0xb1, 0x55, 0x91, 0x7a, 0xb9, 0x16, 0xd8, 0x32, 0xf4, 0xb8, 0x55, 0x4d,
- 0x31, 0xb1, 0xb6, 0x57, 0x00, 0x45, 0x26, 0xd4, 0x80, 0xda, 0xc5, 0xe0, 0x9b, 0xc1, 0xf0, 0xdb,
- 0x81, 0xb9, 0x25, 0x84, 0xa3, 0xe1, 0xc5, 0x60, 0xdc, 0xc7, 0xa6, 0x82, 0x0c, 0xa8, 0x9e, 0x1c,
- 0x5c, 0x9c, 0xf4, 0x4d, 0x15, 0xb5, 0xc0, 0x38, 0x3d, 0x1b, 0x8d, 0x87, 0x27, 0xf8, 0xc0, 0x31,
- 0x35, 0x84, 0xa0, 0x2d, 0x35, 0x05, 0x56, 0x11, 0xae, 0xa3, 0x0b, 0xc7, 0x39, 0xc0, 0xcf, 0xcd,
- 0x2a, 0xaa, 0x43, 0xe5, 0x6c, 0x70, 0x3c, 0x34, 0x75, 0xd4, 0x84, 0xfa, 0x68, 0x7c, 0x30, 0xee,
- 0x8f, 0xfa, 0x63, 0xb3, 0x66, 0x3f, 0x01, 0x7d, 0x44, 0x82, 0xd8, 0xa7, 0x68, 0x07, 0xaa, 0x2f,
- 0x89, 0xbf, 0x4c, 0x8f, 0x45, 0xc1, 0xa9, 0x80, 0x3e, 0x00, 0x83, 0x7b, 0x01, 0x65, 0x9c, 0x04,
- 0xb1, 0xdc, 0xa7, 0x86, 0x0b, 0xc0, 0x8e, 0xa0, 0xde, 0xbf, 0xa2, 0x41, 0xec, 0x93, 0x04, 0xed,
- 0x81, 0xee, 0x93, 0x09, 0xf5, 0x99, 0xa5, 0x74, 0xb4, 0x6e, 0x63, 0x7f, 0xbb, 0x7c, 0xae, 0xe7,
- 0x42, 0x73, 0x58, 0x79, 0xfd, 0xfb, 0x83, 0x2d, 0x9c, 0x99, 0x15, 0x09, 0xd5, 0x7f, 0x4c, 0xa8,
- 0xbd, 0x99, 0xf0, 0x97, 0x2a, 0x18, 0xa7, 0x1e, 0xe3, 0xd1, 0x3c, 0x21, 0x01, 0xba, 0x0f, 0xc6,
- 0x34, 0x5a, 0x86, 0xdc, 0xf5, 0x42, 0x2e, 0x69, 0x57, 0x4e, 0xb7, 0x70, 0x5d, 0x42, 0x67, 0x21,
- 0x47, 0x1f, 0x42, 0x23, 0x55, 0x5f, 0xfa, 0x11, 0xe1, 0x69, 0x9a, 0xd3, 0x2d, 0x0c, 0x12, 0x3c,
- 0x16, 0x18, 0x32, 0x41, 0x63, 0xcb, 0x40, 0xe6, 0x51, 0xb0, 0x58, 0xa2, 0xbb, 0xa0, 0xb3, 0xe9,
- 0x82, 0x06, 0x44, 0x56, 0x6d, 0x1b, 0x67, 0x12, 0x7a, 0x08, 0xed, 0x57, 0x34, 0x89, 0x5c, 0xbe,
- 0x48, 0x28, 0x5b, 0x44, 0xfe, 0x4c, 0x56, 0x50, 0xc1, 0x2d, 0x81, 0x8e, 0x73, 0x10, 0x7d, 0x94,
- 0x99, 0x15, 0xbc, 0x74, 0xc9, 0x4b, 0xc1, 0x4d, 0x81, 0x1f, 0xe5, 0xdc, 0x3e, 0x05, 0xb3, 0x64,
- 0x97, 0x12, 0xac, 0x49, 0x82, 0x0a, 0x6e, 0xaf, 0x2d, 0x53, 0x92, 0x47, 0xd0, 0x0e, 0xe9, 0x9c,
- 0x70, 0xef, 0x25, 0x75, 0x59, 0x4c, 0x42, 0x66, 0xd5, 0xe5, 0x09, 0xdf, 0x2d, 0x9f, 0xf0, 0xe1,
- 0x72, 0xfa, 0x82, 0xf2, 0x51, 0x4c, 0xc2, 0xec, 0x98, 0x5b, 0xb9, 0x8f, 0xc0, 0x18, 0xfa, 0x18,
- 0x6e, 0xad, 0x83, 0xcc, 0xa8, 0xcf, 0x09, 0xb3, 0x8c, 0x8e, 0xd6, 0x45, 0x78, 0x1d, 0xfb, 0xa9,
- 0x44, 0x37, 0x0c, 0x25, 0x3b, 0x66, 0x41, 0x47, 0xeb, 0x2a, 0x85, 0xa1, 0xa4, 0xc6, 0x04, 0xad,
- 0x38, 0x62, 0x5e, 0x89, 0x56, 0xe3, 0xdf, 0xd0, 0xca, 0x7d, 0xd6, 0xb4, 0xd6, 0x41, 0x32, 0x5a,
- 0xcd, 0x94, 0x56, 0x0e, 0x17, 0xb4, 0xd6, 0x86, 0x19, 0xad, 0x56, 0x4a, 0x2b, 0x87, 0x33, 0x5a,
- 0x5f, 0x03, 0x24, 0x94, 0x51, 0xee, 0x2e, 0xc4, 0xe9, 0xb7, 0x65, 0x8f, 0x3f, 0x28, 0x53, 0x5a,
- 0xdf, 0x9f, 0x1e, 0x16, 0x76, 0xa7, 0x5e, 0xc8, 0xb1, 0x91, 0xe4, 0xcb, 0xcd, 0x0b, 0x78, 0xeb,
- 0xcd, 0x0b, 0xf8, 0x05, 0x18, 0x6b, 0xaf, 0xcd, 0x4e, 0xad, 0x81, 0xf6, 0xbc, 0x3f, 0x32, 0x15,
- 0xa4, 0x83, 0x3a, 0x18, 0x9a, 0x6a, 0xd1, 0xad, 0xda, 0x61, 0x0d, 0xaa, 0x92, 0xf3, 0x61, 0x13,
- 0xa0, 0x28, 0xbb, 0xfd, 0x04, 0xa0, 0x38, 0x1f, 0x71, 0xf3, 0xa2, 0xcb, 0x4b, 0x46, 0xd3, 0xab,
- 0xbc, 0x8d, 0x33, 0x49, 0xe0, 0x3e, 0x0d, 0xe7, 0x7c, 0x21, 0x6f, 0x70, 0x0b, 0x67, 0x92, 0xfd,
- 0xa7, 0x02, 0x30, 0xf6, 0x02, 0x3a, 0xa2, 0x89, 0x47, 0xd9, 0xbb, 0xf7, 0xdf, 0x3e, 0xd4, 0x98,
- 0x6c, 0x7d, 0x66, 0xa9, 0xd2, 0x03, 0x95, 0x3d, 0xd2, 0x57, 0x21, 0x73, 0xc9, 0x0d, 0xd1, 0x97,
- 0x60, 0xd0, 0xac, 0xe1, 0x99, 0xa5, 0x49, 0xaf, 0x9d, 0xb2, 0x57, 0xfe, 0x1a, 0x64, 0x7e, 0x85,
- 0x31, 0xfa, 0x0a, 0x60, 0x91, 0x1f, 0x3c, 0xb3, 0x2a, 0xd2, 0xf5, 0xce, 0x5b, 0xcb, 0x92, 0xf9,
- 0x96, 0xcc, 0xed, 0xbf, 0x14, 0xb8, 0xed, 0x78, 0xa1, 0x17, 0x78, 0xaf, 0xe8, 0xac, 0xb4, 0xe7,
- 0x4f, 0xa0, 0x25, 0x37, 0xe3, 0xb2, 0x55, 0x30, 0x89, 0xb2, 0xad, 0xb7, 0xb2, 0x00, 0x4d, 0xa9,
- 0x1a, 0xa5, 0x9a, 0xff, 0xd3, 0x6e, 0x1f, 0x41, 0x55, 0xd6, 0x4b, 0xcc, 0x0a, 0x39, 0x5f, 0x94,
- 0x74, 0x56, 0x88, 0xf5, 0xe6, 0xab, 0x69, 0x64, 0xaf, 0xa6, 0xfd, 0x18, 0xf4, 0xf3, 0xb4, 0xaa,
- 0xef, 0x7a, 0x0d, 0xec, 0x1f, 0x15, 0x68, 0x4a, 0xdc, 0x21, 0x7c, 0xba, 0xa0, 0x09, 0x7a, 0xb4,
- 0x31, 0x1e, 0xef, 0xdf, 0xf0, 0xcf, 0xec, 0x7a, 0xa5, 0xb1, 0x98, 0x13, 0x55, 0xdf, 0x46, 0x54,
- 0x2b, 0x13, 0xed, 0x42, 0x45, 0x0e, 0x39, 0x1d, 0xd4, 0xfe, 0xb3, 0xb4, 0x6b, 0x06, 0xfd, 0x67,
- 0x69, 0xd7, 0x60, 0x31, 0xd8, 0x04, 0x80, 0xfb, 0xa6, 0x66, 0xff, 0xac, 0x88, 0x56, 0x23, 0x33,
- 0xd1, 0x69, 0x0c, 0xbd, 0x07, 0x35, 0xc6, 0x69, 0xec, 0x06, 0x4c, 0xf2, 0xd2, 0xb0, 0x2e, 0x44,
- 0x87, 0x89, 0xd4, 0x97, 0xcb, 0x70, 0x9a, 0xa7, 0x16, 0x6b, 0xf4, 0x3e, 0xd4, 0x19, 0x27, 0x09,
- 0x17, 0xd6, 0xe9, 0x08, 0xa9, 0x49, 0xd9, 0x61, 0xe8, 0x0e, 0xe8, 0x34, 0x9c, 0xb9, 0xb2, 0x28,
- 0x42, 0x51, 0xa5, 0xe1, 0xcc, 0x61, 0xe8, 0x1e, 0xd4, 0xe7, 0x49, 0xb4, 0x8c, 0xbd, 0x70, 0x6e,
- 0x55, 0x3b, 0x5a, 0xd7, 0xc0, 0x6b, 0x19, 0xb5, 0x41, 0x9d, 0xac, 0xe4, 0x33, 0x5e, 0xc7, 0xea,
- 0x64, 0x25, 0xa2, 0x27, 0x24, 0x9c, 0x53, 0x11, 0xa4, 0x96, 0x46, 0x97, 0xb2, 0xc3, 0xec, 0xdf,
- 0x14, 0xa8, 0x1e, 0x2d, 0x96, 0xe1, 0x0b, 0xb4, 0x0b, 0x8d, 0xc0, 0x0b, 0x5d, 0xf1, 0x70, 0x14,
- 0x9c, 0x8d, 0xc0, 0x0b, 0xc5, 0xed, 0x75, 0x98, 0xd4, 0x93, 0xab, 0xb5, 0x3e, 0x9b, 0xac, 0x01,
- 0xb9, 0xca, 0xf4, 0xbd, 0xac, 0x08, 0x9a, 0x2c, 0xc2, 0xbd, 0x72, 0x11, 0x64, 0x82, 0x5e, 0x3f,
- 0x9c, 0x46, 0x33, 0x2f, 0x9c, 0x17, 0x15, 0x10, 0x3f, 0x16, 0xb9, 0xab, 0x26, 0x96, 0x6b, 0xfb,
- 0x29, 0xd4, 0x73, 0xab, 0x1b, 0x4f, 0xd5, 0x77, 0x43, 0xf1, 0xa1, 0xd8, 0xf8, 0x45, 0xa8, 0xe8,
- 0x36, 0xdc, 0x3a, 0x3e, 0x1f, 0x1e, 0x8c, 0xdd, 0xd2, 0xd7, 0xc2, 0xfe, 0x01, 0x5a, 0x32, 0x23,
- 0x9d, 0xfd, 0xd7, 0x87, 0x66, 0x0f, 0xf4, 0xa9, 0x88, 0x90, 0x77, 0xde, 0xf6, 0x8d, 0xdd, 0xe4,
- 0x0e, 0xa9, 0xd9, 0xe1, 0xce, 0xeb, 0xeb, 0x5d, 0xe5, 0xd7, 0xeb, 0x5d, 0xe5, 0x8f, 0xeb, 0x5d,
- 0xe5, 0x7b, 0x5d, 0x58, 0xc7, 0x93, 0x89, 0x2e, 0x3f, 0x74, 0x9f, 0xff, 0x1d, 0x00, 0x00, 0xff,
- 0xff, 0x7c, 0x73, 0x0c, 0xa7, 0x01, 0x0a, 0x00, 0x00,
+ // 1150 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0xcb, 0x6e, 0xdb, 0x46,
+ 0x17, 0x36, 0x49, 0x89, 0x12, 0x8f, 0x2e, 0xa1, 0x27, 0x8e, 0x7f, 0xfe, 0x46, 0xe3, 0xa8, 0x2c,
+ 0xd2, 0xaa, 0x45, 0x21, 0x23, 0x6e, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x82, 0x9a, 0x12,
+ 0x32, 0x92, 0xd1, 0xa6, 0x1b, 0x61, 0x24, 0x8d, 0x25, 0x22, 0xbc, 0x95, 0x33, 0x0a, 0xac, 0xbc,
+ 0x47, 0x77, 0x7d, 0x89, 0xbe, 0x45, 0x80, 0x6e, 0xda, 0x17, 0x28, 0x0a, 0xaf, 0x02, 0xf4, 0x25,
+ 0x8a, 0x19, 0x92, 0x22, 0x15, 0xbb, 0x40, 0xd3, 0x5d, 0x76, 0x73, 0xbe, 0x73, 0xfb, 0xce, 0xcc,
+ 0x99, 0x39, 0x03, 0x35, 0xbe, 0x8c, 0x28, 0xeb, 0x44, 0x71, 0xc8, 0x43, 0x04, 0x51, 0x1c, 0xfa,
+ 0x94, 0xcf, 0xe9, 0x82, 0xed, 0x6c, 0xcd, 0xc2, 0x59, 0x28, 0xe1, 0x3d, 0xb1, 0x4a, 0x2c, 0xec,
+ 0x9f, 0x55, 0x68, 0x3a, 0x94, 0xc7, 0xee, 0xc4, 0xa1, 0x9c, 0x4c, 0x09, 0x27, 0xe8, 0x31, 0x94,
+ 0x44, 0x0c, 0x4b, 0x69, 0x29, 0xed, 0xe6, 0xfe, 0xc3, 0x4e, 0x1e, 0xa3, 0xb3, 0x6e, 0x99, 0x8a,
+ 0xc3, 0x65, 0x44, 0xb1, 0x74, 0x41, 0x9f, 0x03, 0xf2, 0x25, 0x36, 0xba, 0x24, 0xbe, 0xeb, 0x2d,
+ 0x47, 0x01, 0xf1, 0xa9, 0xa5, 0xb6, 0x94, 0xb6, 0x81, 0xcd, 0x44, 0x73, 0x2c, 0x15, 0x3d, 0xe2,
+ 0x53, 0x84, 0xa0, 0x34, 0xa7, 0x5e, 0x64, 0x95, 0xa4, 0x5e, 0xae, 0x05, 0xb6, 0x08, 0x5c, 0x6e,
+ 0x95, 0x13, 0x4c, 0xac, 0xed, 0x25, 0x40, 0x9e, 0x09, 0xd5, 0xa0, 0x72, 0xd1, 0xfb, 0xb6, 0xd7,
+ 0xff, 0xae, 0x67, 0x6e, 0x08, 0xe1, 0xa8, 0x7f, 0xd1, 0x1b, 0x76, 0xb1, 0xa9, 0x20, 0x03, 0xca,
+ 0x27, 0x07, 0x17, 0x27, 0x5d, 0x53, 0x45, 0x0d, 0x30, 0x4e, 0xcf, 0x06, 0xc3, 0xfe, 0x09, 0x3e,
+ 0x70, 0x4c, 0x0d, 0x21, 0x68, 0x4a, 0x4d, 0x8e, 0x95, 0x84, 0xeb, 0xe0, 0xc2, 0x71, 0x0e, 0xf0,
+ 0x73, 0xb3, 0x8c, 0xaa, 0x50, 0x3a, 0xeb, 0x1d, 0xf7, 0x4d, 0x1d, 0xd5, 0xa1, 0x3a, 0x18, 0x1e,
+ 0x0c, 0xbb, 0x83, 0xee, 0xd0, 0xac, 0xd8, 0x4f, 0x40, 0x1f, 0x10, 0x3f, 0xf2, 0x28, 0xda, 0x82,
+ 0xf2, 0x4b, 0xe2, 0x2d, 0x92, 0x6d, 0x51, 0x70, 0x22, 0xa0, 0x0f, 0xc0, 0xe0, 0xae, 0x4f, 0x19,
+ 0x27, 0x7e, 0x24, 0xeb, 0xd4, 0x70, 0x0e, 0xd8, 0x21, 0x54, 0xbb, 0x57, 0xd4, 0x8f, 0x3c, 0x12,
+ 0xa3, 0x3d, 0xd0, 0x3d, 0x32, 0xa6, 0x1e, 0xb3, 0x94, 0x96, 0xd6, 0xae, 0xed, 0x6f, 0x16, 0xf7,
+ 0xf5, 0x5c, 0x68, 0x0e, 0x4b, 0xaf, 0xff, 0x78, 0xb0, 0x81, 0x53, 0xb3, 0x3c, 0xa1, 0xfa, 0x8f,
+ 0x09, 0xb5, 0xb7, 0x13, 0xfe, 0x5a, 0x06, 0xe3, 0xd4, 0x65, 0x3c, 0x9c, 0xc5, 0xc4, 0x47, 0xf7,
+ 0xc1, 0x98, 0x84, 0x8b, 0x80, 0x8f, 0xdc, 0x80, 0x4b, 0xda, 0xa5, 0xd3, 0x0d, 0x5c, 0x95, 0xd0,
+ 0x59, 0xc0, 0xd1, 0x87, 0x50, 0x4b, 0xd4, 0x97, 0x5e, 0x48, 0x78, 0x92, 0xe6, 0x74, 0x03, 0x83,
+ 0x04, 0x8f, 0x05, 0x86, 0x4c, 0xd0, 0xd8, 0xc2, 0x97, 0x79, 0x14, 0x2c, 0x96, 0x68, 0x1b, 0x74,
+ 0x36, 0x99, 0x53, 0x9f, 0xc8, 0x53, 0xdb, 0xc4, 0xa9, 0x84, 0x1e, 0x42, 0xf3, 0x15, 0x8d, 0xc3,
+ 0x11, 0x9f, 0xc7, 0x94, 0xcd, 0x43, 0x6f, 0x2a, 0x4f, 0x50, 0xc1, 0x0d, 0x81, 0x0e, 0x33, 0x10,
+ 0x7d, 0x9c, 0x9a, 0xe5, 0xbc, 0x74, 0xc9, 0x4b, 0xc1, 0x75, 0x81, 0x1f, 0x65, 0xdc, 0x3e, 0x03,
+ 0xb3, 0x60, 0x97, 0x10, 0xac, 0x48, 0x82, 0x0a, 0x6e, 0xae, 0x2c, 0x13, 0x92, 0x47, 0xd0, 0x0c,
+ 0xe8, 0x8c, 0x70, 0xf7, 0x25, 0x1d, 0xb1, 0x88, 0x04, 0xcc, 0xaa, 0xca, 0x1d, 0xde, 0x2e, 0xee,
+ 0xf0, 0xe1, 0x62, 0xf2, 0x82, 0xf2, 0x41, 0x44, 0x82, 0x74, 0x9b, 0x1b, 0x99, 0x8f, 0xc0, 0x18,
+ 0xfa, 0x04, 0xee, 0xac, 0x82, 0x4c, 0xa9, 0xc7, 0x09, 0xb3, 0x8c, 0x96, 0xd6, 0x46, 0x78, 0x15,
+ 0xfb, 0xa9, 0x44, 0xd7, 0x0c, 0x25, 0x3b, 0x66, 0x41, 0x4b, 0x6b, 0x2b, 0xb9, 0xa1, 0xa4, 0xc6,
+ 0x04, 0xad, 0x28, 0x64, 0x6e, 0x81, 0x56, 0xed, 0xdf, 0xd0, 0xca, 0x7c, 0x56, 0xb4, 0x56, 0x41,
+ 0x52, 0x5a, 0xf5, 0x84, 0x56, 0x06, 0xe7, 0xb4, 0x56, 0x86, 0x29, 0xad, 0x46, 0x42, 0x2b, 0x83,
+ 0x53, 0x5a, 0xdf, 0x00, 0xc4, 0x94, 0x51, 0x3e, 0x9a, 0x8b, 0xdd, 0x6f, 0xca, 0x3b, 0xfe, 0xa0,
+ 0x48, 0x69, 0xd5, 0x3f, 0x1d, 0x2c, 0xec, 0x4e, 0xdd, 0x80, 0x63, 0x23, 0xce, 0x96, 0xeb, 0x0d,
+ 0x78, 0xe7, 0xed, 0x06, 0xfc, 0x12, 0x8c, 0x95, 0xd7, 0xfa, 0x4d, 0xad, 0x80, 0xf6, 0xbc, 0x3b,
+ 0x30, 0x15, 0xa4, 0x83, 0xda, 0xeb, 0x9b, 0x6a, 0x7e, 0x5b, 0xb5, 0xc3, 0x0a, 0x94, 0x25, 0xe7,
+ 0xc3, 0x3a, 0x40, 0x7e, 0xec, 0xf6, 0x13, 0x80, 0x7c, 0x7f, 0x44, 0xe7, 0x85, 0x97, 0x97, 0x8c,
+ 0x26, 0xad, 0xbc, 0x89, 0x53, 0x49, 0xe0, 0x1e, 0x0d, 0x66, 0x7c, 0x2e, 0x3b, 0xb8, 0x81, 0x53,
+ 0xc9, 0x7e, 0xa3, 0x00, 0x0c, 0x5d, 0x9f, 0x0e, 0x68, 0xec, 0x52, 0xf6, 0xee, 0xf7, 0x6f, 0x1f,
+ 0x2a, 0x4c, 0x5e, 0x7d, 0x66, 0xa9, 0xd2, 0x03, 0x15, 0x3d, 0x92, 0x57, 0x21, 0x75, 0xc9, 0x0c,
+ 0xd1, 0x57, 0x60, 0xd0, 0xf4, 0xc2, 0x33, 0x4b, 0x93, 0x5e, 0x5b, 0x45, 0xaf, 0xec, 0x35, 0x48,
+ 0xfd, 0x72, 0x63, 0xf4, 0x35, 0xc0, 0x3c, 0xdb, 0x78, 0x66, 0x95, 0xa4, 0xeb, 0xbd, 0x5b, 0x8f,
+ 0x25, 0xf5, 0x2d, 0x98, 0xdb, 0x7f, 0x29, 0x70, 0xd7, 0x71, 0x03, 0xd7, 0x77, 0x5f, 0xd1, 0x69,
+ 0xa1, 0xe6, 0x4f, 0xa1, 0x21, 0x8b, 0x19, 0xb1, 0xa5, 0x3f, 0x0e, 0xd3, 0xd2, 0x1b, 0x69, 0x80,
+ 0xba, 0x54, 0x0d, 0x12, 0xcd, 0xfb, 0x54, 0xed, 0x1b, 0x05, 0xb6, 0x6f, 0xa9, 0xf6, 0x9c, 0x06,
+ 0xe8, 0xa3, 0xdb, 0x0a, 0xae, 0xbc, 0xbf, 0xa5, 0x3e, 0x82, 0xb2, 0x6c, 0x4d, 0x31, 0x16, 0xe5,
+ 0x28, 0x55, 0x92, 0xb1, 0x28, 0xd6, 0xeb, 0x03, 0xc2, 0x48, 0x07, 0x84, 0xfd, 0x18, 0xf4, 0xf3,
+ 0xa4, 0x81, 0xdf, 0xb5, 0xe3, 0xed, 0x9f, 0x14, 0xa8, 0x4b, 0xdc, 0x21, 0x7c, 0x32, 0xa7, 0x31,
+ 0x7a, 0xb4, 0xf6, 0x13, 0xb8, 0x7f, 0xc3, 0x3f, 0xb5, 0xeb, 0x14, 0x7e, 0x00, 0x19, 0x51, 0xf5,
+ 0x36, 0xa2, 0x5a, 0x91, 0x68, 0x1b, 0x4a, 0x72, 0x9e, 0xeb, 0xa0, 0x76, 0x9f, 0x25, 0x0f, 0x44,
+ 0xaf, 0xfb, 0x2c, 0x79, 0x20, 0xb0, 0x98, 0xe1, 0x02, 0xc0, 0x5d, 0x53, 0xb3, 0x7f, 0x51, 0xc4,
+ 0xab, 0x42, 0xa6, 0xe2, 0x51, 0x61, 0xe8, 0x7f, 0x50, 0x61, 0x9c, 0x46, 0x23, 0x9f, 0x49, 0x5e,
+ 0x1a, 0xd6, 0x85, 0xe8, 0x30, 0x91, 0xfa, 0x72, 0x11, 0x4c, 0xb2, 0xd4, 0x62, 0x8d, 0xfe, 0x0f,
+ 0x55, 0xc6, 0x49, 0xcc, 0x85, 0x75, 0x32, 0x2d, 0x2b, 0x52, 0x76, 0x18, 0xba, 0x07, 0x3a, 0x0d,
+ 0xa6, 0x23, 0x79, 0x28, 0x42, 0x51, 0xa6, 0xc1, 0xd4, 0x61, 0x68, 0x07, 0xaa, 0xb3, 0x38, 0x5c,
+ 0x44, 0x6e, 0x30, 0xb3, 0xca, 0x2d, 0xad, 0x6d, 0xe0, 0x95, 0x8c, 0x9a, 0xa0, 0x8e, 0x97, 0x72,
+ 0x62, 0x55, 0xb1, 0x3a, 0x5e, 0x8a, 0xe8, 0x31, 0x09, 0x66, 0x54, 0x04, 0xa9, 0x24, 0xd1, 0xa5,
+ 0xec, 0x30, 0xfb, 0x77, 0x05, 0xca, 0x47, 0xf3, 0x45, 0xf0, 0x02, 0xed, 0x42, 0xcd, 0x77, 0x83,
+ 0x91, 0x78, 0x23, 0x73, 0xce, 0x86, 0xef, 0x06, 0xa2, 0x75, 0x1d, 0x26, 0xf5, 0xe4, 0x6a, 0xa5,
+ 0x4f, 0x3f, 0x11, 0x3e, 0xb9, 0x4a, 0xf5, 0x9d, 0xf4, 0x10, 0x34, 0x79, 0x08, 0x3b, 0xc5, 0x43,
+ 0x90, 0x09, 0x3a, 0xdd, 0x60, 0x12, 0x4e, 0xdd, 0x60, 0x96, 0x9f, 0x80, 0xf8, 0x9c, 0xc9, 0xaa,
+ 0xea, 0x58, 0xae, 0xed, 0xa7, 0x50, 0xcd, 0xac, 0x6e, 0xbc, 0xca, 0xdf, 0xf7, 0xc5, 0xdf, 0x69,
+ 0xed, 0xc3, 0xa4, 0xa2, 0xbb, 0x70, 0xe7, 0xf8, 0xbc, 0x7f, 0x30, 0x1c, 0x15, 0x7e, 0x51, 0xf6,
+ 0x8f, 0xd0, 0x90, 0x19, 0xe9, 0xf4, 0xbf, 0xbe, 0xa9, 0x7b, 0xa0, 0x4f, 0x44, 0x84, 0xec, 0xe6,
+ 0x6d, 0xde, 0xa8, 0x26, 0x73, 0x48, 0xcc, 0x0e, 0xb7, 0x5e, 0x5f, 0xef, 0x2a, 0xbf, 0x5d, 0xef,
+ 0x2a, 0x7f, 0x5e, 0xef, 0x2a, 0x3f, 0xe8, 0xc2, 0x3a, 0x1a, 0x8f, 0x75, 0xf9, 0x77, 0xfd, 0xe2,
+ 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0xa1, 0xff, 0x4a, 0xec, 0x0a, 0x00, 0x00,
}
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
@@ -1855,6 +1934,84 @@ func (m *MinimizedTimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *MinimizedTimeSeriesLen) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MinimizedTimeSeriesLen) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MinimizedTimeSeriesLen) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Histograms) > 0 {
+ for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.LabelSymbols) > 0 {
+ for iNdEx := len(m.LabelSymbols) - 1; iNdEx >= 0; iNdEx-- {
+ i -= 4
+ encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(m.LabelSymbols[iNdEx]))
+ }
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.LabelSymbols)*4))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *Label) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2432,6 +2589,39 @@ func (m *MinimizedTimeSeries) Size() (n int) {
return n
}
+func (m *MinimizedTimeSeriesLen) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LabelSymbols) > 0 {
+ n += 1 + sovTypes(uint64(len(m.LabelSymbols)*4)) + len(m.LabelSymbols)*4
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for _, e := range m.Exemplars {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.Histograms) > 0 {
+ for _, e := range m.Histograms {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
func (m *Label) Size() (n int) {
if m == nil {
return 0
@@ -3980,6 +4170,211 @@ func (m *MinimizedTimeSeries) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *MinimizedTimeSeriesLen) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MinimizedTimeSeriesLen: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MinimizedTimeSeriesLen: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 5 {
+ var v uint32
+ if (iNdEx + 4) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
+ iNdEx += 4
+ m.LabelSymbols = append(m.LabelSymbols, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ elementCount = packedLen / 4
+ if elementCount != 0 && len(m.LabelSymbols) == 0 {
+ m.LabelSymbols = make([]uint32, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint32
+ if (iNdEx + 4) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:]))
+ iNdEx += 4
+ m.LabelSymbols = append(m.LabelSymbols, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelSymbols", wireType)
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Samples = append(m.Samples, Sample{})
+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exemplars = append(m.Exemplars, Exemplar{})
+ if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Histograms = append(m.Histograms, Histogram{})
+ if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Label) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/prompb/types.proto b/prompb/types.proto
index d436004ebb..5cc053630c 100644
--- a/prompb/types.proto
+++ b/prompb/types.proto
@@ -134,6 +134,7 @@ message TimeSeries {
message MinimizedTimeSeries {
// Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length).
+ // Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
repeated uint32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
@@ -144,6 +145,20 @@ message MinimizedTimeSeries {
// TODO: add metadata
}
+message MinimizedTimeSeriesLen {
+ // Sorted list of label name-value pair references, encoded as 32bit uint. This
+ // list's real len is always multiple of 2, label name offset/label value offset.
+ // Offsets point to the symbol table in the higher level MinimizedWriteRequestLen.
+ repeated fixed32 label_symbols = 1;
+
+ // Sorted by time, oldest sample first.
+ // TODO: support references for other types
+ repeated Sample samples = 2 [(gogoproto.nullable) = false];
+ repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
+ repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
+ // TODO: add metadata
+}
+
message Label {
string name = 1;
string value = 2;
diff --git a/scripts/genproto.sh b/scripts/genproto.sh
index 86e2135793..a21a53fa2f 100755
--- a/scripts/genproto.sh
+++ b/scripts/genproto.sh
@@ -10,10 +10,10 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255
fi
-if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
- echo "could not find protoc 3.21.12, is it installed + in PATH?"
- exit 255
-fi
+#if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
+# echo "could not find protoc 3.21.12, is it installed + in PATH?"
+# exit 255
+#fi
# Since we run go install, go mod download, the go.sum will change.
# Make a backup.
diff --git a/scripts/remotewrite11-bench/run.sh b/scripts/remotewrite11-bench/run.sh
index 235c40f400..431be4c658 100755
--- a/scripts/remotewrite11-bench/run.sh
+++ b/scripts/remotewrite11-bench/run.sh
@@ -7,7 +7,8 @@ trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
declare -a INSTANCES
# (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags)
INSTANCES+=('sender-v1;;receiver-v1;')
-INSTANCES+=('sender-v11;--enable-feature rw-1-1-sender;receiver-v11;--enable-feature rw-1-1-receiver')
+INSTANCES+=('sender-v11-min32-optimized-varint;--remote-write-format 1;receiver-v11-min32-optimized-varint;--remote-write-format 1')
+INSTANCES+=('sender-v11-min-len;--remote-write-format 2;receiver-v11-min-len;--remote-write-format 2')
# ~~~~~~~~~~~~~
diff --git a/scripts/remotewrite11-bench/sender-template.yml b/scripts/remotewrite11-bench/sender-template.yml
index 3db9e7082c..a26c5d810f 100644
--- a/scripts/remotewrite11-bench/sender-template.yml
+++ b/scripts/remotewrite11-bench/sender-template.yml
@@ -1,5 +1,5 @@
global:
- scrape_interval: 15s
+ scrape_interval: 5s
external_labels:
role: ${SENDER_NAME}
@@ -8,6 +8,8 @@ remote_write:
name: ${RECEIVER_NAME}
metadata_config:
send: false
+ queue_config:
+ max_samples_per_send: 5000
scrape_configs:
${SCRAPE_CONFIGS}
\ No newline at end of file
diff --git a/storage/remote/client.go b/storage/remote/client.go
index 7f0615577a..57c6d8e59e 100644
--- a/storage/remote/client.go
+++ b/storage/remote/client.go
@@ -81,11 +81,11 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct {
- remoteName string // Used to differentiate clients in metrics.
- urlString string // url.String()
- remoteWrite11 bool // For write clients, ignored for read clients.
- Client *http.Client
- timeout time.Duration
+ remoteName string // Used to differentiate clients in metrics.
+ urlString string // url.String()
+ rwFormat RemoteWriteFormat // For write clients, ignored for read clients.
+ Client *http.Client
+ timeout time.Duration
retryOnRateLimit bool
@@ -96,14 +96,14 @@ type Client struct {
// ClientConfig configures a client.
type ClientConfig struct {
- URL *config_util.URL
- RemoteWrite11 bool
- Timeout model.Duration
- HTTPClientConfig config_util.HTTPClientConfig
- SigV4Config *sigv4.SigV4Config
- AzureADConfig *azuread.AzureADConfig
- Headers map[string]string
- RetryOnRateLimit bool
+ URL *config_util.URL
+ RemoteWriteFormat RemoteWriteFormat
+ Timeout model.Duration
+ HTTPClientConfig config_util.HTTPClientConfig
+ SigV4Config *sigv4.SigV4Config
+ AzureADConfig *azuread.AzureADConfig
+ Headers map[string]string
+ RetryOnRateLimit bool
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@@ -165,7 +165,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
- remoteWrite11: conf.RemoteWrite11,
+ rwFormat: conf.RemoteWriteFormat,
remoteName: name,
urlString: conf.URL.String(),
Client: httpClient,
@@ -211,11 +211,11 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent)
- // Set the right header if we're using v1.1 remote write protocol
- if c.remoteWrite11 {
- httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
- } else {
+ if c.rwFormat == Base1 {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
+ } else {
+ // Set the right header if we're using v1.1 remote write protocol
+ httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
}
if attempt > 0 {
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index b11850e552..80afcdd368 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -15,6 +15,7 @@ package remote
import (
"compress/gzip"
+ "encoding/binary"
"errors"
"fmt"
"io"
@@ -23,6 +24,7 @@ import (
"sort"
"strings"
"sync"
+ "unsafe"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
@@ -752,15 +754,6 @@ func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
return spans
}
-// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
-func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
- metric := make(model.Metric, len(labelPairs))
- for _, l := range labelPairs {
- metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
- return metric
-}
-
func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels {
b := labels.ScratchBuilder{}
for _, l := range labelPairs {
@@ -794,6 +787,44 @@ func labelsToUint32Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []u
return result
}
+func labelsToUint32SliceLen(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint32) []uint32 {
+ result := buf[:0]
+ lbls.Range(func(l labels.Label) {
+ off := symbolTable.RefLen(l.Name)
+ result = append(result, off)
+ off = symbolTable.RefLen(l.Value)
+ result = append(result, off)
+ })
+ return result
+}
+
+func Uint32LenRefToLabels(symbols []byte, minLabels []uint32) labels.Labels {
+ ls := labels.NewScratchBuilder(len(minLabels) / 2)
+
+ labelIdx := 0
+ for labelIdx < len(minLabels) {
+ // todo, check for overflow?
+ offset := minLabels[labelIdx]
+ labelIdx++
+ length, n := binary.Uvarint(symbols[offset:])
+ offset += uint32(n)
+ name := symbols[offset : uint64(offset)+length]
+
+ offset = minLabels[labelIdx]
+ labelIdx++
+ length, n = binary.Uvarint(symbols[offset:])
+ offset += uint32(n)
+ value := symbols[offset : uint64(offset)+length]
+ ls.Add(yoloString(name), yoloString(value))
+ }
+
+ return ls.Labels()
+}
+
+func yoloString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2)
@@ -923,10 +954,29 @@ func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, er
return &req, nil
}
+func DecodeMinimizedWriteRequestLen(r io.Reader) (*prompb.MinimizedWriteRequestLen, error) {
+ compressed, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ reqBuf, err := snappy.Decode(nil, compressed)
+ if err != nil {
+ return nil, err
+ }
+
+ var req prompb.MinimizedWriteRequestLen
+ if err := proto.Unmarshal(reqBuf, &req); err != nil {
+ return nil, err
+ }
+
+ return &req, nil
+}
+
func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
- //Metadata: redReq.Metadata,
+ // TODO handle metadata?
}
for i, rts := range redReq.Timeseries {
@@ -951,12 +1001,3 @@ func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (
}
return req, nil
}
-
-// for use with minimized remote write proto format
-func packRef(offset, length int) uint32 {
- return uint32((offset&0xFFFFF)<<12 | (length & 0xFFF))
-}
-
-func unpackRef(ref uint32) (offset, length int) {
- return int(ref>>12) & 0xFFFFF, int(ref & 0xFFF)
-}
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index a81d91dd53..03ab16aac0 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -77,7 +77,7 @@ var writeRequestFixture = &prompb.WriteRequest{
// writeRequestMinimizedFixture represents the same request as writeRequestFixture, but using the minimized representation.
var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
st := newRwSymbolTable()
- labels := []uint32{}
+ var labels []uint32
for _, s := range []string{
"__name__", "test_metric1",
"b", "c",
@@ -85,8 +85,8 @@ var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
"d", "e",
"foo", "bar",
} {
- off, len := st.Ref(s)
- labels = append(labels, off, len)
+ off, length := st.Ref(s)
+ labels = append(labels, off, length)
}
return &prompb.MinimizedWriteRequest{
Timeseries: []prompb.MinimizedTimeSeries{
@@ -568,13 +568,6 @@ func TestDecodeMinWriteRequest(t *testing.T) {
require.Equal(t, writeRequestMinimizedFixture, actual)
}
-func TestMinimizedWriteRequestToWriteRequest(t *testing.T) {
- actual, err := MinimizedWriteRequestToWriteRequest(writeRequestMinimizedFixture)
- require.NoError(t, err)
-
- require.Equal(t, writeRequestFixture, actual)
-}
-
func TestNilHistogramProto(t *testing.T) {
// This function will panic if it impromperly handles nil
// values, causing the test to fail.
@@ -893,3 +886,11 @@ func (c *mockChunkIterator) Next() bool {
func (c *mockChunkIterator) Err() error {
return nil
}
+
+func TestLenFormat(t *testing.T) {
+ r := newRwSymbolTable()
+ ls := labels.FromStrings("asdf", "qwer", "zxcv", "1234")
+ encoded := labelsToUint32SliceLen(ls, &r, nil)
+ decoded := Uint32LenRefToLabels(r.LabelsData(), encoded)
+ require.Equal(t, ls, decoded)
+}
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index 0d24ee72d8..9ca4ebcbe7 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -15,6 +15,7 @@ package remote
import (
"context"
+ "encoding/binary"
"errors"
"math"
"strconv"
@@ -25,7 +26,6 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
-
"github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -389,6 +389,14 @@ type WriteClient interface {
Endpoint() string
}
+type RemoteWriteFormat int64
+
+const (
+ Base1 RemoteWriteFormat = iota // original map based format
+ Min32Optimized // two 32bit varint plus marshalling optimization
+ MinLen // symbols are now just offsets, and we encode lengths as varints in the large symbols string (which is also now a byte slice)
+)
+
// QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher.
@@ -406,7 +414,7 @@ type QueueManager struct {
watcher *wlog.Watcher
metadataWatcher *MetadataWatcher
// experimental feature, new remote write proto format
- internFormat bool
+ rwFormat RemoteWriteFormat
clientMtx sync.RWMutex
storeClient WriteClient
@@ -454,7 +462,7 @@ func NewQueueManager(
sm ReadyScrapeManager,
enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool,
- internFormat bool,
+ rwFormat RemoteWriteFormat,
) *QueueManager {
if logger == nil {
logger = log.NewNopLogger()
@@ -477,7 +485,9 @@ func NewQueueManager(
storeClient: client,
sendExemplars: enableExemplarRemoteWrite,
sendNativeHistograms: enableNativeHistogramRemoteWrite,
- internFormat: internFormat,
+ // TODO: we should eventually set the format via content negotiation,
+ // so this field would be the desired format, maybe with a fallback?
+ rwFormat: rwFormat,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
@@ -1276,7 +1286,6 @@ func (q *queue) Chan() <-chan []timeSeries {
func (q *queue) Batch() []timeSeries {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
-
select {
case batch := <-q.batchQueue:
return batch
@@ -1363,6 +1372,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
max += int(float64(max) * 0.1)
}
+ // TODO we should make an interface for the timeseries type
batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData {
@@ -1377,6 +1387,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
pendingMinimizedData[i].Samples = []prompb.Sample{{}}
}
+ pendingMinLenData := make([]prompb.MinimizedTimeSeriesLen, max)
+ for i := range pendingMinLenData {
+ pendingMinLenData[i].Samples = []prompb.Sample{{}}
+ }
+
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() {
if !timer.Stop() {
@@ -1411,17 +1426,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok {
return
}
- if s.qm.internFormat {
- nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
-
- n := nPendingSamples + nPendingExemplars + nPendingHistograms
- s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
- symbolTable.clear()
- } else {
+ switch s.qm.rwFormat {
+ case Base1:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
+ case Min32Optimized:
+ nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
+ symbolTable.clear()
+ case MinLen:
+ nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesLen(&symbolTable, batch, pendingMinLenData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ s.sendMinLenSamples(ctx, pendingMinLenData[:n], symbolTable.LabelsData(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
+ symbolTable.clear()
}
+
queue.ReturnForReuse(batch)
stop()
@@ -1430,18 +1451,27 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
case <-timer.C:
batch := queue.Batch()
if len(batch) > 0 {
- if s.qm.internFormat {
- nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
-
- n := nPendingSamples + nPendingExemplars + nPendingHistograms
- s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
- symbolTable.clear()
- } else {
+ switch s.qm.rwFormat {
+ case Base1:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
+ case Min32Optimized:
+ nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
+ "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
+ s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
+ symbolTable.clear()
+ case MinLen:
+ nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesLen(&symbolTable, batch, pendingMinLenData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
+ "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
+ s.sendMinLenSamples(ctx, pendingMinLenData[:n], symbolTable.LabelsData(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
+ symbolTable.clear()
}
}
queue.ReturnForReuse(batch)
@@ -1502,7 +1532,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
-func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedTimeSeries, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *[]byte, buf *[]byte) {
+func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedTimeSeries, labels string, sampleCount, exemplarCount, histogramCount int, pBuf, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
@@ -1514,6 +1544,18 @@ func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedT
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
+func (s *shards) sendMinLenSamples(ctx context.Context, samples []prompb.MinimizedTimeSeriesLen, labels []byte, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
+ begin := time.Now()
+ // Build the ReducedWriteRequest with no metadata.
+ // Failing to build the write request is non-recoverable, since it will
+ // only error if marshaling the proto to bytes fails.
+ req, highest, err := buildMinimizedWriteRequestLen(samples, labels, pBuf, buf)
+ if err == nil {
+ err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
+ }
+ s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
+}
+
func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) {
if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
@@ -1638,6 +1680,42 @@ func populateMinimizedTimeSeries(symbolTable *rwSymbolTable, batch []timeSeries,
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
+func populateMinimizedTimeSeriesLen(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesLen, sendExemplars, sendNativeHistograms bool) (int, int, int) {
+ var nPendingSamples, nPendingExemplars, nPendingHistograms int
+ for nPending, d := range batch {
+ pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
+ if sendExemplars {
+ pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
+ }
+ if sendNativeHistograms {
+ pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
+ }
+
+ // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
+ // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
+ // stop reading from the queue. This makes it safe to reference pendingSamples by index.
+ // pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
+
+ pendingData[nPending].LabelSymbols = labelsToUint32SliceLen(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
+ switch d.sType {
+ case tSample:
+ pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
+ Value: d.value,
+ Timestamp: d.timestamp,
+ })
+ nPendingSamples++
+ // TODO: handle all exemplars
+ case tHistogram:
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
+ nPendingHistograms++
+ case tFloatHistogram:
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
+ nPendingHistograms++
+ }
+ }
+ return nPendingSamples, nPendingExemplars, nPendingHistograms
+}
+
func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error {
backoff := cfg.MinBackoff
sleepDuration := model.Duration(0)
@@ -1728,7 +1806,7 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
- if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
+ if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
@@ -1742,38 +1820,64 @@ type offLenPair struct {
}
type rwSymbolTable struct {
- symbols []byte
- symbolsMap map[string]offLenPair
+ symbols []byte
+ symbolsMap map[string]offLenPair
+ symbolsMapBytes map[string]uint32
}
func newRwSymbolTable() rwSymbolTable {
return rwSymbolTable{
- symbolsMap: make(map[string]offLenPair),
+ symbolsMap: make(map[string]offLenPair),
+ symbolsMapBytes: make(map[string]uint32),
}
}
-func (r *rwSymbolTable) Ref(str string) (off uint32, leng uint32) {
+func (r *rwSymbolTable) Ref(str string) (uint32, uint32) {
if offlen, ok := r.symbolsMap[str]; ok {
return offlen.Off, offlen.Len
}
- off, leng = uint32(len(r.symbols)), uint32(len(str))
+ off, length := uint32(len(r.symbols)), uint32(len(str))
+ if int(off) > len(r.symbols) {
+ panic(1)
+ }
r.symbols = append(r.symbols, str...)
- r.symbolsMap[str] = offLenPair{off, leng}
- return
+ if len(r.symbols) < int(off+length) {
+ panic(2)
+ }
+ r.symbolsMap[str] = offLenPair{off, length}
+ return off, length
+}
+
+func (r *rwSymbolTable) RefLen(str string) uint32 {
+ if ref, ok := r.symbolsMapBytes[str]; ok {
+ return ref
+ }
+ ref := uint32(len(r.symbols))
+ r.symbols = binary.AppendUvarint(r.symbols, uint64(len(str)))
+ r.symbols = append(r.symbols, str...)
+ r.symbolsMapBytes[str] = ref
+ return ref
}
func (r *rwSymbolTable) LabelsString() string {
return *((*string)(unsafe.Pointer(&r.symbols)))
}
+func (r *rwSymbolTable) LabelsData() []byte {
+ return r.symbols
+}
+
func (r *rwSymbolTable) clear() {
for k := range r.symbolsMap {
delete(r.symbolsMap, k)
}
+ for k := range r.symbolsMapBytes {
+ delete(r.symbolsMapBytes, k)
+ }
r.symbols = r.symbols[:0]
}
-func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels string, pBuf *[]byte, buf *[]byte) ([]byte, int64, error) {
+func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels string, pBuf, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
@@ -1811,7 +1915,53 @@ func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels str
}
compressed := snappy.Encode(*buf, data)
- if n := snappy.MaxEncodedLen(len(data)); buf != nil && n > len(*buf) {
+ if n := snappy.MaxEncodedLen(len(data)); n > len(*buf) {
+ // grow the buffer for the next time
+ *buf = make([]byte, n)
+ }
+ return compressed, highest, nil
+}
+
+func buildMinimizedWriteRequestLen(samples []prompb.MinimizedTimeSeriesLen, labels []byte, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
+ var highest int64
+ for _, ts := range samples {
+ // At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
+ if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
+ highest = ts.Samples[0].Timestamp
+ }
+ if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
+ highest = ts.Exemplars[0].Timestamp
+ }
+ if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
+ highest = ts.Histograms[0].Timestamp
+ }
+ }
+
+ req := &prompb.MinimizedWriteRequestLen{
+ Symbols: labels,
+ Timeseries: samples,
+ }
+
+ if pBuf == nil {
+ pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
+ } else {
+ pBuf.Reset()
+ }
+ err := pBuf.Marshal(req)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // snappy uses len() to see if it needs to allocate a new slice. Make the
+ // buffer as long as possible.
+ if buf != nil {
+ *buf = (*buf)[0:cap(*buf)]
+ } else {
+ buf = &[]byte{}
+ }
+
+ compressed := snappy.Encode(*buf, pBuf.Bytes())
+ if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index 593fea9295..dbd53a69d2 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -67,7 +67,7 @@ func TestSampleDelivery(t *testing.T) {
exemplars bool
histograms bool
floatHistograms bool
- remoteWrite11 bool
+ rwFormat RemoteWriteFormat
}{
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
@@ -75,11 +75,11 @@ func TestSampleDelivery(t *testing.T) {
{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
- {remoteWrite11: true, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
- {remoteWrite11: true, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
- {remoteWrite11: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
- {remoteWrite11: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
- {remoteWrite11: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
+ {rwFormat: Min32Optimized, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
+ {rwFormat: Min32Optimized, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
+ {rwFormat: Min32Optimized, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
+ {rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
+ {rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
}
// Let's create an even number of send batches so we don't run into the
@@ -106,7 +106,7 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.rwFormat)
defer s.Close()
var (
@@ -139,7 +139,7 @@ func TestSampleDelivery(t *testing.T) {
require.NoError(t, err)
qm := s.rws.queues[hash]
- c := NewTestWriteClient(tc.remoteWrite11)
+ c := NewTestWriteClient(tc.rwFormat)
qm.SetClient(c)
qm.StoreSeries(series, 0)
@@ -170,7 +170,7 @@ func TestSampleDelivery(t *testing.T) {
}
func TestMetadataDelivery(t *testing.T) {
- c := NewTestWriteClient(false)
+ c := NewTestWriteClient(Base1)
dir := t.TempDir()
@@ -178,7 +178,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, 0)
m.Start()
defer m.Stop()
@@ -204,13 +204,12 @@ func TestMetadataDelivery(t *testing.T) {
}
func TestSampleDeliveryTimeout(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
// Let's send one less sample than batch size, and wait the timeout duration
n := 9
samples, series := createTimeseries(n, n)
- c := NewTestWriteClient(remoteWrite11)
+ c := NewTestWriteClient(rwFormat)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
@@ -220,7 +219,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@@ -238,9 +237,8 @@ func TestSampleDeliveryTimeout(t *testing.T) {
}
func TestSampleDeliveryOrder(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n)
@@ -258,7 +256,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
})
}
- c := NewTestWriteClient(remoteWrite11)
+ c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series)
dir := t.TempDir()
@@ -267,7 +265,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@@ -289,7 +287,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
@@ -327,7 +325,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for i := 0; i < numSegments; i++ {
series := []record.RefSeries{}
for j := 0; j < numSeries; j++ {
@@ -341,15 +339,14 @@ func TestSeriesReset(t *testing.T) {
}
func TestReshard(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
size := 10 // Make bigger to find more races.
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries)
- c := NewTestWriteClient(remoteWrite11)
+ c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig
@@ -359,7 +356,7 @@ func TestReshard(t *testing.T) {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@@ -385,10 +382,9 @@ func TestReshard(t *testing.T) {
}
func TestReshardRaceWithStop(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
- c := NewTestWriteClient(remoteWrite11)
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
+ c := NewTestWriteClient(rwFormat)
var m *QueueManager
h := sync.Mutex{}
@@ -400,7 +396,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() {
for {
metrics := newQueueManagerMetrics(nil, "", "")
- m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start()
h.Unlock()
h.Lock()
@@ -425,9 +421,8 @@ func TestReshardRaceWithStop(t *testing.T) {
}
func TestReshardPartialBatch(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
samples, series := createTimeseries(1, 10)
c := NewTestBlockedWriteClient()
@@ -440,7 +435,7 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
@@ -472,9 +467,8 @@ func TestReshardPartialBatch(t *testing.T) {
// where a large scrape (> capacity + max samples per send) is appended at the
// same time as a batch times out according to the batch send deadline.
func TestQueueFilledDeadlock(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
samples, series := createTimeseries(50, 1)
c := NewNopWriteClient()
@@ -490,7 +484,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
@@ -515,14 +509,13 @@ func TestQueueFilledDeadlock(t *testing.T) {
}
func TestReleaseNoninternedString(t *testing.T) {
- for _, proto := range []string{"1.1", "1.0"} {
- t.Run(proto, func(t *testing.T) {
- remoteWrite11 := proto == "1.1"
+ for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized} {
+ t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- c := NewTestWriteClient(remoteWrite11)
- m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11)
+ c := NewTestWriteClient(rwFormat)
+ m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start()
defer m.Stop()
@@ -570,8 +563,9 @@ func TestShouldReshard(t *testing.T) {
mcfg := config.DefaultMetadataConfig
for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "")
- client := NewTestWriteClient(false)
- m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ // todo: test with new proto type(s)
+ client := NewTestWriteClient(Base1)
+ m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut)
@@ -706,16 +700,16 @@ type TestWriteClient struct {
wg sync.WaitGroup
mtx sync.Mutex
buf []byte
- expectRemoteWrite11 bool
+ rwFormat RemoteWriteFormat
}
-func NewTestWriteClient(expectRemoteWrite11 bool) *TestWriteClient {
+func NewTestWriteClient(rwFormat RemoteWriteFormat) *TestWriteClient {
return &TestWriteClient{
- withWaitGroup: true,
- receivedSamples: map[string][]prompb.Sample{},
- expectedSamples: map[string][]prompb.Sample{},
- receivedMetadata: map[string][]prompb.MetricMetadata{},
- expectRemoteWrite11: expectRemoteWrite11,
+ withWaitGroup: true,
+ receivedSamples: map[string][]prompb.Sample{},
+ expectedSamples: map[string][]prompb.Sample{},
+ receivedMetadata: map[string][]prompb.MetricMetadata{},
+ rwFormat: rwFormat,
}
}
@@ -803,6 +797,7 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
c.mtx.Lock()
defer c.mtx.Unlock()
+
for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
}
@@ -831,25 +826,27 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
}
var reqProto *prompb.WriteRequest
- if c.expectRemoteWrite11 {
- var reqReduced prompb.MinimizedWriteRequest
- err = proto.Unmarshal(reqBuf, &reqReduced)
- if err == nil {
- reqProto, err = MinimizedWriteRequestToWriteRequest(&reqReduced)
- }
- } else {
+ switch c.rwFormat {
+ case Base1:
reqProto = &prompb.WriteRequest{}
err = proto.Unmarshal(reqBuf, reqProto)
+ case Min32Optimized:
+ var reqMin prompb.MinimizedWriteRequest
+ err = proto.Unmarshal(reqBuf, &reqMin)
+ if err == nil {
+ reqProto, err = MinimizedWriteRequestToWriteRequest(&reqMin)
+ }
}
if err != nil {
+ fmt.Println("error: ", err)
return err
}
count := 0
for _, ts := range reqProto.Timeseries {
- labels := labelProtosToLabels(ts.Labels)
- seriesName := labels.Get("__name__")
+ ls := labelProtosToLabels(ts.Labels)
+ seriesName := ls.Get("__name__")
for _, sample := range ts.Samples {
count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
@@ -860,12 +857,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
}
- for _, histogram := range ts.Histograms {
+ for _, hist := range ts.Histograms {
count++
- if histogram.IsFloatHistogram() {
- c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram)
+ if hist.IsFloatHistogram() {
+ c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], hist)
} else {
- c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram)
+ c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], hist)
}
}
@@ -965,7 +962,8 @@ func BenchmarkSampleSend(b *testing.B) {
dir := b.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ // todo: test with new proto type(s)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.StoreSeries(series, 0)
// These should be received by the client.
@@ -1009,9 +1007,10 @@ func BenchmarkStartup(b *testing.B) {
for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient()
+ // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
- cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
@@ -1094,7 +1093,8 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
- m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ // todo: test with new proto type(s)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
// Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual
@@ -1163,7 +1163,7 @@ func TestCalculateDesiredShards(t *testing.T) {
}
func TestCalculateDesiredShardsDetail(t *testing.T) {
- c := NewTestWriteClient(false)
+ c := NewTestWriteClient(Base1)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
@@ -1171,7 +1171,8 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
- m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false)
+ // todo: test with new proto type(s)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for _, tc := range []struct {
name string
@@ -1432,9 +1433,9 @@ func createDummyTimeSeries(instances int) []timeSeries {
b := labels.NewBuilder(commonLabels)
b.Set("pod", "prometheus-"+strconv.Itoa(i))
for _, lbls := range metrics {
- for _, l := range lbls {
+ lbls.Range(func(l labels.Label) {
b.Set(l.Name, l.Value)
- }
+ })
result = append(result, timeSeries{
seriesLabels: b.Labels(),
value: r.Float64(),
@@ -1495,9 +1496,9 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) {
batch []timeSeries
}
testCases := []testcase{
- testcase{createDummyTimeSeries(2)},
- testcase{createDummyTimeSeries(10)},
- testcase{createDummyTimeSeries(100)},
+ {createDummyTimeSeries(2)},
+ {createDummyTimeSeries(10)},
+ {createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go
index 4868175fb3..5897642d6b 100644
--- a/storage/remote/read_test.go
+++ b/storage/remote/read_test.go
@@ -91,7 +91,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases {
t.Run("", func(t *testing.T) {
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)?
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs,
diff --git a/storage/remote/storage.go b/storage/remote/storage.go
index 05a107218c..63abe30ba9 100644
--- a/storage/remote/storage.go
+++ b/storage/remote/storage.go
@@ -62,7 +62,7 @@ type Storage struct {
}
// NewStorage returns a remote.Storage.
-func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *Storage {
+func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *Storage {
if l == nil {
l = log.NewNopLogger()
}
@@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger,
localStartTimeCallback: stCallback,
}
- s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11)
+ s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, rwFormat)
return s
}
diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go
index cad6fb338f..59826f0128 100644
--- a/storage/remote/storage_test.go
+++ b/storage/remote/storage_test.go
@@ -27,7 +27,8 @@ import (
func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -54,7 +55,8 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
@@ -75,7 +77,8 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
@@ -100,7 +103,8 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
diff --git a/storage/remote/write.go b/storage/remote/write.go
index a9239504bc..733ea6fdfb 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -65,7 +65,7 @@ type WriteStorage struct {
externalLabels labels.Labels
dir string
queues map[string]*QueueManager
- remoteWrite11 bool
+ rwFormat RemoteWriteFormat
samplesIn *ewmaRate
flushDeadline time.Duration
interner *pool
@@ -77,13 +77,13 @@ type WriteStorage struct {
}
// NewWriteStorage creates and runs a WriteStorage.
-func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *WriteStorage {
+func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *WriteStorage {
if logger == nil {
logger = log.NewNopLogger()
}
rws := &WriteStorage{
queues: make(map[string]*QueueManager),
- remoteWrite11: remoteWrite11,
+ rwFormat: rwFormat,
watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger,
@@ -156,14 +156,14 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
}
c, err := NewWriteClient(name, &ClientConfig{
- URL: rwConf.URL,
- RemoteWrite11: rws.remoteWrite11,
- Timeout: rwConf.RemoteTimeout,
- HTTPClientConfig: rwConf.HTTPClientConfig,
- SigV4Config: rwConf.SigV4Config,
- AzureADConfig: rwConf.AzureADConfig,
- Headers: rwConf.Headers,
- RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
+ URL: rwConf.URL,
+ RemoteWriteFormat: rws.rwFormat,
+ Timeout: rwConf.RemoteTimeout,
+ HTTPClientConfig: rwConf.HTTPClientConfig,
+ SigV4Config: rwConf.SigV4Config,
+ AzureADConfig: rwConf.AzureADConfig,
+ Headers: rwConf.Headers,
+ RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
})
if err != nil {
return err
@@ -200,7 +200,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper,
rwConf.SendExemplars,
rwConf.SendNativeHistograms,
- rws.remoteWrite11,
+ rws.rwFormat,
)
// Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash)
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index 9ab69a54dc..09ad8422c9 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -46,17 +46,17 @@ type writeHandler struct {
// Experimental feature, new remote write proto format
// The handler will accept the new format, but it can still accept the old one
- enableRemoteWrite11 bool
+ // TODO: this should eventually be via content negotiation
+ rwFormat RemoteWriteFormat
}
// NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable.
-func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool) http.Handler {
+func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, rwFormat RemoteWriteFormat) http.Handler {
h := &writeHandler{
- logger: logger,
- appendable: appendable,
- enableRemoteWrite11: enableRemoteWrite11,
-
+ logger: logger,
+ appendable: appendable,
+ rwFormat: rwFormat,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
@@ -74,11 +74,16 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
var req *prompb.WriteRequest
var reqMin *prompb.MinimizedWriteRequest
+ var reqMinLen *prompb.MinimizedWriteRequestLen
- if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
- reqMin, err = DecodeMinimizedWriteRequest(r.Body)
- } else {
+ // TODO: this should eventually be done via content negotiation/looking at the header
+ switch h.rwFormat {
+ case Base1:
req, err = DecodeWriteRequest(r.Body)
+ case Min32Optimized:
+ reqMin, err = DecodeMinimizedWriteRequest(r.Body)
+ case MinLen:
+ reqMinLen, err = DecodeMinimizedWriteRequestLen(r.Body)
}
if err != nil {
@@ -87,11 +92,16 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue {
- err = h.writeMin(r.Context(), reqMin)
- } else {
+ // TODO: this should eventually be done detecting the format version above
+ switch h.rwFormat {
+ case Base1:
err = h.write(r.Context(), req)
+ case Min32Optimized:
+ err = h.writeMin(r.Context(), reqMin)
+ case MinLen:
+ err = h.writeMinLen(r.Context(), reqMinLen)
}
+
switch err {
case nil:
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
@@ -320,3 +330,41 @@ func (h *writeHandler) writeMin(ctx context.Context, req *prompb.MinimizedWriteR
return nil
}
+
+func (h *writeHandler) writeMinLen(ctx context.Context, req *prompb.MinimizedWriteRequestLen) (err error) {
+ outOfOrderExemplarErrs := 0
+
+ app := h.appendable.Appender(ctx)
+ defer func() {
+ if err != nil {
+ _ = app.Rollback()
+ return
+ }
+ err = app.Commit()
+ }()
+
+ for _, ts := range req.Timeseries {
+ ls := Uint32LenRefToLabels(req.Symbols, ts.LabelSymbols)
+
+ err := h.appendSamples(app, ts.Samples, ls)
+ if err != nil {
+ return err
+ }
+
+ for _, ep := range ts.Exemplars {
+ e := exemplarProtoToExemplar(ep)
+ h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
+ }
+
+ err = h.appendHistograms(app, ts.Histograms, ls)
+ if err != nil {
+ return err
+ }
+ }
+
+ if outOfOrderExemplarErrs > 0 {
+ _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
+ }
+
+ return nil
+}
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index 58abc23734..9f8068e9c0 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -45,7 +45,8 @@ func TestRemoteWriteHandler(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -57,25 +58,25 @@ func TestRemoteWriteHandler(t *testing.T) {
j := 0
k := 0
for _, ts := range writeRequestFixture.Timeseries {
- labels := labelProtosToLabels(ts.Labels)
+ ls := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples {
- require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
+ require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels)
- require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
+ require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp)
- require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
+ require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
- require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
+ require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
@@ -92,7 +93,8 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{}
- handler := NewWriteHandler(nil, nil, appendable, true)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(nil, nil, appendable, Min32Optimized)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -145,7 +147,8 @@ func TestOutOfOrderSample(t *testing.T) {
appendable := &mockAppendable{
latestSample: 100,
}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -170,7 +173,8 @@ func TestOutOfOrderExemplar(t *testing.T) {
appendable := &mockAppendable{
latestExemplar: 100,
}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -193,8 +197,8 @@ func TestOutOfOrderHistogram(t *testing.T) {
appendable := &mockAppendable{
latestHistogram: 100,
}
-
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -205,7 +209,7 @@ func TestOutOfOrderHistogram(t *testing.T) {
func BenchmarkRemoteWritehandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
- reqs := []*http.Request{}
+ var reqs []*http.Request
for i := 0; i < b.N; i++ {
num := strings.Repeat(strconv.Itoa(i), 16)
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
@@ -222,7 +226,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
appendable := &mockAppendable{}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
b.ResetTimer()
@@ -231,37 +236,6 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
}
-// TODO(npazosmendez): adapt to minimized version
-// func BenchmarkReducedRemoteWriteHandler(b *testing.B) {
-// const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
-// reqs := []*http.Request{}
-// for i := 0; i < b.N; i++ {
-// pool := newLookupPool()
-// num := strings.Repeat(strconv.Itoa(i), 16)
-// buf, _, err := buildReducedWriteRequest([]prompb.ReducedTimeSeries{{
-// Labels: []prompb.LabelRef{
-// {NameRef: pool.intern("__name__"), ValueRef: pool.intern("test_metric")},
-// {NameRef: pool.intern("test_label_name_" + num), ValueRef: pool.intern(labelValue + num)},
-// },
-// Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
-// }}, pool.getTable(), nil, nil)
-// require.NoError(b, err)
-// req, err := http.NewRequest("", "", bytes.NewReader(buf))
-// require.NoError(b, err)
-// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
-// reqs = append(reqs, req)
-// }
-
-// appendable := &mockAppendable{}
-// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false)
-// recorder := httptest.NewRecorder()
-
-// b.ResetTimer()
-// for _, req := range reqs {
-// handler.ServeHTTP(recorder, req)
-// }
-// }
-
func TestCommitErr(t *testing.T) {
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
require.NoError(t, err)
@@ -272,7 +246,8 @@ func TestCommitErr(t *testing.T) {
appendable := &mockAppendable{
commitErr: fmt.Errorf("commit error"),
}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -297,8 +272,8 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
b.Cleanup(func() {
require.NoError(b, db.Close())
})
-
- handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), Base1)
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err)
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index ec5730f855..34cd9b90a5 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -117,7 +117,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
}
for _, tc := range cases {
- s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: tc.cfgs,
@@ -139,7 +140,8 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg)
require.NoError(t, err)
- s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
@@ -164,7 +166,8 @@ func TestRestartOnNameChange(t *testing.T) {
func TestUpdateWithRegisterer(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, Base1)
c1 := &config.RemoteWriteConfig{
Name: "named",
URL: &common_config.URL{
@@ -204,7 +207,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
func TestWriteStorageLifecycle(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -221,7 +225,8 @@ func TestWriteStorageLifecycle(t *testing.T) {
func TestUpdateExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, Base1)
externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{
@@ -250,8 +255,8 @@ func TestUpdateExternalLabels(t *testing.T) {
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
-
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -276,7 +281,8 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
+ // todo: test with new format type(s)
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second),
diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go
index f44760ea13..a4571d59af 100644
--- a/tsdb/agent/db_test.go
+++ b/tsdb/agent/db_test.go
@@ -88,7 +88,7 @@ func createTestAgentDB(t *testing.T, reg prometheus.Registerer, opts *Options) *
t.Helper()
dbDir := t.TempDir()
- rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
+ rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, remote.Base1)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@@ -584,7 +584,7 @@ func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
- rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
+ rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, remote.Base1)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@@ -604,7 +604,7 @@ func TestLockfile(t *testing.T) {
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
- rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
+ rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, remote.Base1)
defer func() {
require.NoError(t, rs.Close())
}()
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index 6187aaf032..7594b65472 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -253,8 +253,8 @@ func NewAPI(
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
rwEnabled bool,
+ rwFormat remote.RemoteWriteFormat,
otlpEnabled bool,
- enableRemoteWrite11 bool,
) *API {
a := &API{
QueryEngine: qe,
@@ -296,7 +296,7 @@ func NewAPI(
}
if rwEnabled {
- a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11)
+ a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, rwFormat)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 20b231c233..aa91e3b492 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -459,9 +459,10 @@ func TestEndpoints(t *testing.T) {
dbDir := t.TempDir()
+ // TODO: test with other proto format(s)?
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil
- }, dbDir, 1*time.Second, nil, false)
+ }, dbDir, 1*time.Second, nil, remote.Base1)
err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index b3b4d5a8e7..145e3d183b 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -36,6 +36,7 @@ import (
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/util/annotations"
)
@@ -136,7 +137,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
nil,
nil,
false,
- false,
+ remote.Base1,
false, // Disable experimental reduce remote write proto support.
)
diff --git a/web/web.go b/web/web.go
index 59049ba703..6430166f4a 100644
--- a/web/web.go
+++ b/web/web.go
@@ -58,6 +58,7 @@ import (
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
@@ -242,27 +243,27 @@ type Options struct {
Version *PrometheusVersion
Flags map[string]string
- ListenAddress string
- CORSOrigin *regexp.Regexp
- ReadTimeout time.Duration
- MaxConnections int
- ExternalURL *url.URL
- RoutePrefix string
- UseLocalAssets bool
- UserAssetsPath string
- ConsoleTemplatesPath string
- ConsoleLibrariesPath string
- EnableLifecycle bool
- EnableAdminAPI bool
- PageTitle string
- RemoteReadSampleLimit int
- RemoteReadConcurrencyLimit int
- RemoteReadBytesInFrame int
- EnableRemoteWriteReceiver bool
- EnableOTLPWriteReceiver bool
- IsAgent bool
- AppName string
- EnableReceiverRemoteWrite11 bool
+ ListenAddress string
+ CORSOrigin *regexp.Regexp
+ ReadTimeout time.Duration
+ MaxConnections int
+ ExternalURL *url.URL
+ RoutePrefix string
+ UseLocalAssets bool
+ UserAssetsPath string
+ ConsoleTemplatesPath string
+ ConsoleLibrariesPath string
+ EnableLifecycle bool
+ EnableAdminAPI bool
+ PageTitle string
+ RemoteReadSampleLimit int
+ RemoteReadConcurrencyLimit int
+ RemoteReadBytesInFrame int
+ EnableRemoteWriteReceiver bool
+ EnableOTLPWriteReceiver bool
+ IsAgent bool
+ AppName string
+ RemoteWriteFormat remote.RemoteWriteFormat
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
@@ -352,8 +353,8 @@ func New(logger log.Logger, o *Options) *Handler {
o.Registerer,
nil,
o.EnableRemoteWriteReceiver,
+ o.RemoteWriteFormat,
o.EnableOTLPWriteReceiver,
- o.EnableReceiverRemoteWrite11,
)
if o.RoutePrefix != "/" {