refactor new version flag to make it easier to pick a specific format

instead of having multiple flags, plus add new formats for testing

Signed-off-by: Callum Styan <callumstyan@gmail.com>
Signed-off-by: Nicolás Pazos <npazosmendez@gmail.com>
This commit is contained in:
Callum Styan 2023-11-13 11:13:49 -08:00 committed by Nicolás Pazos
parent 8353c337b4
commit 23c7936cee
28 changed files with 5203 additions and 19512 deletions

View file

@ -154,7 +154,8 @@ type flagConfig struct {
enableNewSDManager bool enableNewSDManager bool
enablePerStepStats bool enablePerStepStats bool
enableAutoGOMAXPROCS bool enableAutoGOMAXPROCS bool
enableSenderRemoteWrite11 bool // todo: how to use the enable feature flag properly + use the remote format enum type
rwFormat int
prometheusURL string prometheusURL string
corsRegexString string corsRegexString string
@ -220,11 +221,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
continue continue
case "promql-at-modifier", "promql-negative-offset": case "promql-at-modifier", "promql-negative-offset":
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o) level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
case "rw-1-1-sender":
c.enableSenderRemoteWrite11 = true
level.Info(logger).Log("msg", "Experimental remote write 1.1 will be used on the sender end, receiver must be able to parse this new protobuf format.")
case "rw-1-1-receiver":
c.web.EnableReceiverRemoteWrite11 = true
default: default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
} }
@ -438,6 +434,9 @@ func main() {
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("remote-write-format", "remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format)").
Default("0").IntVar(&cfg.rwFormat)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
@ -610,7 +609,7 @@ func main() {
var ( var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()} localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{} scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.enableSenderRemoteWrite11) remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, remote.RemoteWriteFormat(cfg.rwFormat))
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
) )
@ -772,6 +771,7 @@ func main() {
cfg.web.Flags[f.Name] = f.Value.String() cfg.web.Flags[f.Name] = f.Value.String()
} }
cfg.web.RemoteWriteFormat = remote.RemoteWriteFormat(cfg.rwFormat)
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)

View file

@ -53,6 +53,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--remote-write-format</code> | remote write proto format to use, valid options: 0 (1.0), 1 (reduced format), 3 (min64 format) | `0` |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -57,38 +57,6 @@ func main() {
} }
}) })
http.HandleFunc("/receiveReduced", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeReducedWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(req.StringSymbolTable[l.NameRef])] = model.LabelValue(req.StringSymbolTable[l.ValueRef])
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
}
for _, hp := range ts.Histograms {
h := remote.HistogramProtoToHistogram(hp)
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
})
http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/receiveMinimized", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeMinimizedWriteRequest(r.Body) req, err := remote.DecodeMinimizedWriteRequest(r.Body)
if err != nil { if err != nil {
@ -97,8 +65,25 @@ func main() {
} }
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := remote.Uint32RefToLabels(req.Symbols, ts.LabelSymbols) m := make(model.Metric, len(ts.LabelSymbols)/2)
fmt.Println(ls) labelIdx := 0
for labelIdx < len(ts.LabelSymbols) {
// todo, check for overflow?
offset := ts.LabelSymbols[labelIdx]
labelIdx++
length := ts.LabelSymbols[labelIdx]
labelIdx++
name := req.Symbols[offset : offset+length]
// todo, check for overflow?
offset = ts.LabelSymbols[labelIdx]
labelIdx++
length = ts.LabelSymbols[labelIdx]
labelIdx++
value := req.Symbols[offset : offset+length]
m[model.LabelName(name)] = model.LabelValue(value)
}
fmt.Println(m)
for _, s := range ts.Samples { for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)

View file

@ -60,7 +60,7 @@ func (x ReadRequest_ResponseType) String() string {
} }
func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2, 0} return fileDescriptor_eefc82927d57d89b, []int{5, 0}
} }
type WriteRequest struct { type WriteRequest struct {
@ -118,6 +118,63 @@ func (m *WriteRequest) GetMetadata() []MetricMetadata {
return nil return nil
} }
type MinimizedWriteRequestPacking struct {
Timeseries []MinimizedTimeSeriesPacking `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestPacking) Reset() { *m = MinimizedWriteRequestPacking{} }
func (m *MinimizedWriteRequestPacking) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestPacking) ProtoMessage() {}
func (*MinimizedWriteRequestPacking) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1}
}
func (m *MinimizedWriteRequestPacking) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestPacking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestPacking.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestPacking) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestPacking.Merge(m, src)
}
func (m *MinimizedWriteRequestPacking) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestPacking) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestPacking.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestPacking proto.InternalMessageInfo
func (m *MinimizedWriteRequestPacking) GetTimeseries() []MinimizedTimeSeriesPacking {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestPacking) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
type MinimizedWriteRequest struct { type MinimizedWriteRequest struct {
Timeseries []MinimizedTimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` Timeseries []MinimizedTimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required // The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
@ -132,7 +189,7 @@ func (m *MinimizedWriteRequest) Reset() { *m = MinimizedWriteRequest{} }
func (m *MinimizedWriteRequest) String() string { return proto.CompactTextString(m) } func (m *MinimizedWriteRequest) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequest) ProtoMessage() {} func (*MinimizedWriteRequest) ProtoMessage() {}
func (*MinimizedWriteRequest) Descriptor() ([]byte, []int) { func (*MinimizedWriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{1} return fileDescriptor_eefc82927d57d89b, []int{2}
} }
func (m *MinimizedWriteRequest) XXX_Unmarshal(b []byte) error { func (m *MinimizedWriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -175,6 +232,120 @@ func (m *MinimizedWriteRequest) GetSymbols() string {
return "" return ""
} }
type MinimizedWriteRequestFixed32 struct {
Timeseries []MinimizedTimeSeriesFixed32 `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestFixed32) Reset() { *m = MinimizedWriteRequestFixed32{} }
func (m *MinimizedWriteRequestFixed32) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestFixed32) ProtoMessage() {}
func (*MinimizedWriteRequestFixed32) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3}
}
func (m *MinimizedWriteRequestFixed32) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestFixed32) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestFixed32.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestFixed32) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestFixed32.Merge(m, src)
}
func (m *MinimizedWriteRequestFixed32) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestFixed32) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestFixed32.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestFixed32 proto.InternalMessageInfo
func (m *MinimizedWriteRequestFixed32) GetTimeseries() []MinimizedTimeSeriesFixed32 {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestFixed32) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
type MinimizedWriteRequestFixed64 struct {
Timeseries []MinimizedTimeSeriesFixed64 `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
Symbols string `protobuf:"bytes,4,opt,name=symbols,proto3" json:"symbols,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MinimizedWriteRequestFixed64) Reset() { *m = MinimizedWriteRequestFixed64{} }
func (m *MinimizedWriteRequestFixed64) String() string { return proto.CompactTextString(m) }
func (*MinimizedWriteRequestFixed64) ProtoMessage() {}
func (*MinimizedWriteRequestFixed64) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4}
}
func (m *MinimizedWriteRequestFixed64) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MinimizedWriteRequestFixed64) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MinimizedWriteRequestFixed64.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *MinimizedWriteRequestFixed64) XXX_Merge(src proto.Message) {
xxx_messageInfo_MinimizedWriteRequestFixed64.Merge(m, src)
}
func (m *MinimizedWriteRequestFixed64) XXX_Size() int {
return m.Size()
}
func (m *MinimizedWriteRequestFixed64) XXX_DiscardUnknown() {
xxx_messageInfo_MinimizedWriteRequestFixed64.DiscardUnknown(m)
}
var xxx_messageInfo_MinimizedWriteRequestFixed64 proto.InternalMessageInfo
func (m *MinimizedWriteRequestFixed64) GetTimeseries() []MinimizedTimeSeriesFixed64 {
if m != nil {
return m.Timeseries
}
return nil
}
func (m *MinimizedWriteRequestFixed64) GetSymbols() string {
if m != nil {
return m.Symbols
}
return ""
}
// ReadRequest represents a remote read request. // ReadRequest represents a remote read request.
type ReadRequest struct { type ReadRequest struct {
Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
@ -193,7 +364,7 @@ func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) } func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {} func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { func (*ReadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{2} return fileDescriptor_eefc82927d57d89b, []int{5}
} }
func (m *ReadRequest) XXX_Unmarshal(b []byte) error { func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -249,7 +420,7 @@ func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) } func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {} func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { func (*ReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{3} return fileDescriptor_eefc82927d57d89b, []int{6}
} }
func (m *ReadResponse) XXX_Unmarshal(b []byte) error { func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -299,7 +470,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) } func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {} func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) { func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{4} return fileDescriptor_eefc82927d57d89b, []int{7}
} }
func (m *Query) XXX_Unmarshal(b []byte) error { func (m *Query) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -368,7 +539,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) } func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {} func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) { func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{5} return fileDescriptor_eefc82927d57d89b, []int{8}
} }
func (m *QueryResult) XXX_Unmarshal(b []byte) error { func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -421,7 +592,7 @@ func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} }
func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) }
func (*ChunkedReadResponse) ProtoMessage() {} func (*ChunkedReadResponse) ProtoMessage() {}
func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { func (*ChunkedReadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_eefc82927d57d89b, []int{6} return fileDescriptor_eefc82927d57d89b, []int{9}
} }
func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -467,7 +638,10 @@ func (m *ChunkedReadResponse) GetQueryIndex() int64 {
func init() { func init() {
proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value)
proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest")
proto.RegisterType((*MinimizedWriteRequestPacking)(nil), "prometheus.MinimizedWriteRequestPacking")
proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest") proto.RegisterType((*MinimizedWriteRequest)(nil), "prometheus.MinimizedWriteRequest")
proto.RegisterType((*MinimizedWriteRequestFixed32)(nil), "prometheus.MinimizedWriteRequestFixed32")
proto.RegisterType((*MinimizedWriteRequestFixed64)(nil), "prometheus.MinimizedWriteRequestFixed64")
proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse")
proto.RegisterType((*Query)(nil), "prometheus.Query") proto.RegisterType((*Query)(nil), "prometheus.Query")
@ -478,41 +652,45 @@ func init() {
func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) }
var fileDescriptor_eefc82927d57d89b = []byte{ var fileDescriptor_eefc82927d57d89b = []byte{
// 543 bytes of a gzipped FileDescriptorProto // 601 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0xdf, 0x4e, 0x13, 0x41,
0x10, 0xae, 0xeb, 0xb4, 0x09, 0xe3, 0x10, 0x99, 0x6d, 0x43, 0x4c, 0x0e, 0x49, 0x64, 0x71, 0x88, 0x14, 0xc6, 0x19, 0x5a, 0x68, 0x3d, 0x8b, 0x64, 0x1d, 0x40, 0x56, 0x62, 0x80, 0x6c, 0x8c, 0x69,
0x54, 0x14, 0x44, 0xa8, 0x38, 0xf5, 0x40, 0x5a, 0x22, 0x95, 0x52, 0xf3, 0xb3, 0x09, 0x02, 0x21, 0x82, 0xa9, 0xb1, 0x34, 0x5c, 0x71, 0x21, 0x60, 0x0d, 0x22, 0xab, 0x38, 0xad, 0xd1, 0x18, 0x93,
0x24, 0xcb, 0xb1, 0x47, 0x8d, 0x45, 0xfc, 0x53, 0xef, 0x5a, 0x6a, 0x38, 0xf3, 0x00, 0x3c, 0x13, 0xcd, 0x76, 0xf7, 0x84, 0x4e, 0x60, 0xff, 0xb0, 0x33, 0x9b, 0x50, 0xaf, 0x7d, 0x00, 0xe3, 0x23,
0xa7, 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x0d, 0x20, 0xc4, 0xcd, 0xfb, 0x79, 0xc5, 0x95, 0xf1, 0x09, 0x8c, 0xe1, 0x49, 0xcc, 0xfe, 0x83, 0xa9, 0xd6, 0x68, 0x9a, 0x78,
0xfd, 0xcd, 0xec, 0xec, 0x18, 0xea, 0x09, 0x06, 0x11, 0xc7, 0x41, 0x9c, 0x44, 0x3c, 0x22, 0x10, 0xb7, 0x7b, 0xce, 0xf7, 0x7d, 0xfb, 0x9b, 0x33, 0x3b, 0x03, 0x73, 0x31, 0xfa, 0xa1, 0xc4, 0x66,
0x27, 0x51, 0x80, 0x7c, 0x8e, 0x29, 0x6b, 0x6b, 0x7c, 0x19, 0x23, 0xcb, 0x89, 0xf6, 0xfe, 0x45, 0x14, 0x87, 0x32, 0xa4, 0x10, 0xc5, 0xa1, 0x8f, 0x72, 0x80, 0x89, 0x58, 0xd1, 0xe4, 0x30, 0x42,
0x74, 0x11, 0x89, 0xcf, 0xfb, 0xd9, 0x57, 0x8e, 0x9a, 0x9f, 0x15, 0xa8, 0xbf, 0x49, 0x7c, 0x8e, 0x91, 0x37, 0x56, 0x16, 0x8f, 0xc3, 0xe3, 0x30, 0x7b, 0x7c, 0x98, 0x3e, 0xe5, 0x55, 0xf3, 0x13,
0x14, 0x2f, 0x53, 0x64, 0x9c, 0x1c, 0x01, 0x70, 0x3f, 0x40, 0x86, 0x89, 0x8f, 0xcc, 0x50, 0x7a, 0x81, 0xb9, 0x37, 0x31, 0x97, 0xc8, 0xf0, 0x2c, 0x41, 0x21, 0xe9, 0x36, 0x80, 0xe4, 0x3e, 0x0a,
0x6a, 0x5f, 0x1b, 0xde, 0x1e, 0xfc, 0x0a, 0x1d, 0x4c, 0xfd, 0x00, 0x27, 0x82, 0x3d, 0xae, 0x5c, 0x8c, 0x39, 0x0a, 0x83, 0xac, 0x57, 0x1a, 0x5a, 0xeb, 0x76, 0xf3, 0x3a, 0xb4, 0xd9, 0xe3, 0x3e,
0x7f, 0xef, 0x6e, 0x51, 0x49, 0x4f, 0x8e, 0xa0, 0x16, 0x20, 0x77, 0x3c, 0x87, 0x3b, 0x86, 0x2a, 0x76, 0xb3, 0xee, 0x6e, 0xf5, 0xe2, 0xfb, 0xda, 0x14, 0x53, 0xf4, 0x74, 0x1b, 0xea, 0x3e, 0x4a,
0xbc, 0x6d, 0xd9, 0x6b, 0x21, 0x4f, 0x7c, 0xd7, 0x2a, 0x14, 0x85, 0x7f, 0xed, 0x38, 0xab, 0xd4, 0xc7, 0x73, 0xa4, 0x63, 0x54, 0x32, 0xef, 0x8a, 0xea, 0xb5, 0x50, 0xc6, 0xdc, 0xb5, 0x0a, 0x45,
0xb6, 0x75, 0xd5, 0xfc, 0xa4, 0x40, 0xd3, 0xf2, 0x43, 0x3f, 0xf0, 0x3f, 0xa2, 0xb7, 0xd1, 0xdb, 0xe1, 0xbf, 0x72, 0x1c, 0x54, 0xeb, 0xd3, 0x7a, 0xc5, 0xfc, 0x4c, 0xe0, 0xae, 0xc5, 0x03, 0xee,
0xf8, 0x2f, 0xbd, 0x75, 0x37, 0xf2, 0x4b, 0xdb, 0x3f, 0x9b, 0x34, 0xa0, 0xca, 0x96, 0xc1, 0x2c, 0xf3, 0x0f, 0xe8, 0xa9, 0x6c, 0x47, 0x8e, 0x7b, 0xc2, 0x83, 0x63, 0x7a, 0x38, 0x06, 0xf1, 0xfe,
0x5a, 0x30, 0xa3, 0xd2, 0x53, 0xfa, 0x37, 0x68, 0x79, 0xcc, 0x1b, 0x38, 0xab, 0xd4, 0x54, 0xbd, 0xc8, 0x67, 0x4a, 0xf7, 0x35, 0x6b, 0xe1, 0x1d, 0x83, 0x6c, 0x40, 0x4d, 0x0c, 0xfd, 0x7e, 0x78,
0x62, 0x7e, 0x55, 0x40, 0xa3, 0xe8, 0x78, 0x65, 0xf1, 0x03, 0xa8, 0x5e, 0xa6, 0x72, 0xe5, 0x5b, 0x2a, 0x8c, 0xea, 0x3a, 0x69, 0xdc, 0x60, 0xe5, 0x6b, 0x8e, 0x73, 0x50, 0xad, 0x57, 0xf4, 0xaa,
0x72, 0xe5, 0x57, 0x29, 0x26, 0x4b, 0x5a, 0x2a, 0xc8, 0x7b, 0x68, 0x39, 0xae, 0x8b, 0x31, 0x47, 0xf9, 0x91, 0xc0, 0xd2, 0x58, 0x28, 0xda, 0x19, 0x43, 0xb3, 0xf6, 0x17, 0x9a, 0x89, 0x31, 0xfe,
0xcf, 0x4e, 0x90, 0xc5, 0x51, 0xc8, 0xd0, 0x16, 0xaf, 0x61, 0x6c, 0xf7, 0xd4, 0x7e, 0x63, 0x78, 0x38, 0x9b, 0xa7, 0xfc, 0x1c, 0xbd, 0xcd, 0xd6, 0x44, 0xb3, 0x29, 0xbc, 0xff, 0x09, 0x6a, 0xab,
0x57, 0x36, 0x4b, 0x65, 0x06, 0xb4, 0x50, 0x4f, 0x97, 0x31, 0xd2, 0x66, 0x19, 0x22, 0xa3, 0xcc, 0x3d, 0x39, 0xd4, 0x56, 0x7b, 0x62, 0xa8, 0xaf, 0x04, 0x34, 0x86, 0x8e, 0x57, 0x6e, 0xd3, 0x06,
0x3c, 0x84, 0xba, 0x0c, 0x10, 0x0d, 0xaa, 0x93, 0x91, 0xf5, 0xf2, 0x7c, 0x3c, 0xd1, 0xb7, 0x48, 0xd4, 0xce, 0x12, 0x15, 0xe0, 0x96, 0x0a, 0xf0, 0x2a, 0xc1, 0x78, 0xc8, 0x4a, 0x05, 0x7d, 0x0f,
0x0b, 0xf6, 0x26, 0x53, 0x3a, 0x1e, 0x59, 0xe3, 0x27, 0xf6, 0xdb, 0x17, 0xd4, 0x3e, 0x39, 0x7d, 0xcb, 0x8e, 0xeb, 0x62, 0x24, 0xd1, 0xb3, 0x63, 0x14, 0x51, 0x18, 0x08, 0xb4, 0xb3, 0xc3, 0x64,
0xfd, 0xfc, 0xd9, 0x44, 0x57, 0xcc, 0x51, 0xe6, 0x72, 0xd6, 0x51, 0xe4, 0x01, 0x54, 0x13, 0x64, 0x4c, 0xaf, 0x57, 0x1a, 0xf3, 0xad, 0x7b, 0xaa, 0x59, 0xf9, 0x4c, 0x93, 0x15, 0xea, 0xde, 0x30,
0xe9, 0x82, 0x97, 0x17, 0x6a, 0xfd, 0x79, 0x21, 0xc1, 0xd3, 0x52, 0x67, 0x7e, 0x51, 0x60, 0x47, 0x42, 0xb6, 0x54, 0x86, 0xa8, 0x55, 0x61, 0xb6, 0x61, 0x4e, 0x2d, 0x50, 0x0d, 0x6a, 0xdd, 0x1d,
0x10, 0xe4, 0x1e, 0x10, 0xc6, 0x9d, 0x84, 0xdb, 0x62, 0xae, 0xdc, 0x09, 0x62, 0x3b, 0xc8, 0x72, 0xeb, 0xe8, 0xb0, 0xd3, 0xd5, 0xa7, 0xe8, 0x32, 0x2c, 0x74, 0x7b, 0xac, 0xb3, 0x63, 0x75, 0x9e,
0x94, 0xbe, 0x4a, 0x75, 0xc1, 0x4c, 0x4b, 0xc2, 0x62, 0xa4, 0x0f, 0x3a, 0x86, 0xde, 0xa6, 0x76, 0xd8, 0x6f, 0x5f, 0x32, 0x7b, 0x6f, 0xff, 0xf5, 0x8b, 0xe7, 0x5d, 0x9d, 0x98, 0x3b, 0xa9, 0xcb,
0x5b, 0x68, 0x1b, 0x18, 0x7a, 0xb2, 0xf2, 0x10, 0x6a, 0x81, 0xc3, 0xdd, 0x39, 0x26, 0xac, 0x58, 0xb9, 0x8a, 0xa2, 0x8f, 0xa0, 0x16, 0xa3, 0x48, 0x4e, 0x65, 0xb9, 0xa0, 0xe5, 0xdf, 0x17, 0x94,
0x20, 0x43, 0xee, 0xea, 0xdc, 0x99, 0xe1, 0xc2, 0xca, 0x05, 0x74, 0xad, 0x24, 0x07, 0xb0, 0x33, 0xf5, 0x59, 0xa9, 0x33, 0xbf, 0x10, 0x98, 0xc9, 0x1a, 0xf4, 0x01, 0x50, 0x21, 0x9d, 0x58, 0xda,
0xf7, 0x43, 0x9e, 0xbf, 0xa7, 0x36, 0x6c, 0xfe, 0x3e, 0xdc, 0xd3, 0x8c, 0xa4, 0xb9, 0xc6, 0x1c, 0xd9, 0x5c, 0xa5, 0xe3, 0x47, 0xb6, 0x9f, 0xe6, 0x90, 0x46, 0x85, 0xe9, 0x59, 0xa7, 0x57, 0x36,
0x83, 0x26, 0x5d, 0x8e, 0x3c, 0xfa, 0xff, 0x85, 0x97, 0xb7, 0xc8, 0xbc, 0x82, 0xbd, 0x93, 0x79, 0x2c, 0x41, 0x1b, 0xa0, 0x63, 0xe0, 0x8d, 0x6a, 0xa7, 0x33, 0xed, 0x3c, 0x06, 0x9e, 0xaa, 0x6c,
0x1a, 0x7e, 0xc8, 0x1e, 0x47, 0x9a, 0xea, 0x63, 0x68, 0xb8, 0x39, 0x6c, 0x6f, 0x44, 0xde, 0x91, 0x43, 0xdd, 0x77, 0xa4, 0x3b, 0xc0, 0x58, 0x14, 0xe7, 0xdf, 0x50, 0xa9, 0x0e, 0x9d, 0x3e, 0x9e,
0x23, 0x0b, 0x63, 0x91, 0x7a, 0xd3, 0x95, 0x8f, 0xa4, 0x0b, 0x5a, 0xb6, 0x46, 0x4b, 0xdb, 0x0f, 0x5a, 0xb9, 0x80, 0x5d, 0x29, 0xe9, 0x06, 0xcc, 0x0c, 0x78, 0x20, 0xf3, 0xfd, 0xd4, 0x5a, 0x4b,
0x3d, 0xbc, 0x2a, 0xe6, 0x04, 0x02, 0x7a, 0x9a, 0x21, 0xc7, 0xfb, 0xd7, 0xab, 0x8e, 0xf2, 0x6d, 0xbf, 0x0e, 0x77, 0x3f, 0x6d, 0xb2, 0x5c, 0x63, 0x76, 0x40, 0x53, 0x16, 0x47, 0xb7, 0xfe, 0xfd,
0xd5, 0x51, 0x7e, 0xac, 0x3a, 0xca, 0xbb, 0xdd, 0x2c, 0x37, 0x9e, 0xcd, 0x76, 0xc5, 0x0f, 0xfd, 0xbe, 0x52, 0xff, 0x22, 0xf3, 0x1c, 0x16, 0xf6, 0x06, 0x49, 0x70, 0x92, 0x6e, 0x8e, 0x32, 0xd5,
0xf0, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x3e, 0xdc, 0x81, 0x0f, 0x04, 0x00, 0x00, 0xc7, 0x30, 0xef, 0xe6, 0x65, 0x7b, 0x24, 0xf2, 0x8e, 0x1a, 0x59, 0x18, 0x8b, 0xd4, 0x9b, 0xae,
0xfa, 0x4a, 0xd7, 0x40, 0x4b, 0x7f, 0xa3, 0xa1, 0xcd, 0x03, 0x0f, 0xcf, 0x8b, 0x39, 0x41, 0x56,
0x7a, 0x96, 0x56, 0x76, 0x17, 0x2f, 0x2e, 0x57, 0xc9, 0xb7, 0xcb, 0x55, 0xf2, 0xe3, 0x72, 0x95,
0xbc, 0x9b, 0x4d, 0x73, 0xa3, 0x7e, 0x7f, 0x36, 0xbb, 0x8f, 0x37, 0x7f, 0x06, 0x00, 0x00, 0xff,
0xff, 0x13, 0xe9, 0xfd, 0x50, 0xce, 0x05, 0x00, 0x00,
} }
func (m *WriteRequest) Marshal() (dAtA []byte, err error) { func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
@ -570,6 +748,54 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *MinimizedWriteRequestPacking) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestPacking) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestPacking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequest) Marshal() (dAtA []byte, err error) { func (m *MinimizedWriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -618,6 +844,102 @@ func (m *MinimizedWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *MinimizedWriteRequestFixed32) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestFixed32) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestFixed32) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *MinimizedWriteRequestFixed64) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MinimizedWriteRequestFixed64) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MinimizedWriteRequestFixed64) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Symbols) > 0 {
i -= len(m.Symbols)
copy(dAtA[i:], m.Symbols)
i = encodeVarintRemote(dAtA, i, uint64(len(m.Symbols)))
i--
dAtA[i] = 0x22
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *ReadRequest) Marshal() (dAtA []byte, err error) { func (m *ReadRequest) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -903,6 +1225,28 @@ func (m *WriteRequest) Size() (n int) {
return n return n
} }
func (m *MinimizedWriteRequestPacking) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MinimizedWriteRequest) Size() (n int) { func (m *MinimizedWriteRequest) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@ -925,6 +1269,50 @@ func (m *MinimizedWriteRequest) Size() (n int) {
return n return n
} }
func (m *MinimizedWriteRequestFixed32) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *MinimizedWriteRequestFixed64) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
l = len(m.Symbols)
if l > 0 {
n += 1 + l + sovRemote(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ReadRequest) Size() (n int) { func (m *ReadRequest) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@ -1160,6 +1548,123 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *MinimizedWriteRequestPacking) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestPacking: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestPacking: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesPacking{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error { func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
@ -1277,6 +1782,240 @@ func (m *MinimizedWriteRequest) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *MinimizedWriteRequestFixed32) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed32: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed32: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesFixed32{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MinimizedWriteRequestFixed64) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed64: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MinimizedWriteRequestFixed64: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Timeseries = append(m.Timeseries, MinimizedTimeSeriesFixed64{})
if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRemote
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRemote
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthRemote
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Symbols = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRemote(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthRemote
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadRequest) Unmarshal(dAtA []byte) error { func (m *ReadRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View file

@ -27,6 +27,17 @@ message WriteRequest {
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
} }
message MinimizedWriteRequestPacking {
repeated MinimizedTimeSeriesPacking timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
message MinimizedWriteRequest { message MinimizedWriteRequest {
repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false]; repeated MinimizedTimeSeries timeseries = 1 [(gogoproto.nullable) = false];
@ -40,6 +51,30 @@ message MinimizedWriteRequest {
string symbols = 4; string symbols = 4;
} }
message MinimizedWriteRequestFixed32 {
repeated MinimizedTimeSeriesFixed32 timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
message MinimizedWriteRequestFixed64 {
repeated MinimizedTimeSeriesFixed64 timeseries = 1 [(gogoproto.nullable) = false];
// Cortex uses this field to determine the source of the write request.
// We reserve it to avoid any compatibility issues.
reserved 2;
// Metadata (3) has moved to be part of the TimeSeries type
reserved 3;
// The symbols table. All symbols are concatenated strings. To read the symbols table, it's required
// to know the offset:length range of the actual symbol to read from this string.
string symbols = 4;
}
// ReadRequest represents a remote read request. // ReadRequest represents a remote read request.
message ReadRequest { message ReadRequest {
repeated Query queries = 1; repeated Query queries = 1;

File diff suppressed because it is too large Load diff

View file

@ -144,6 +144,43 @@ message MinimizedTimeSeries {
// TODO: add metadata // TODO: add metadata
} }
// based on an experiment by marco
message MinimizedTimeSeriesFixed32 {
// Sorted list of label name-value pair references. This list's len is always multiple of 4,
// packing tuples of (label name offset, label name length, label value offset, label value length).
repeated fixed32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
// based on an experiment by marco
message MinimizedTimeSeriesFixed64 {
// Sorted list of label name-value pair references. usespackref64
repeated fixed64 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
// based on an experiment by marco
message MinimizedTimeSeriesPacking {
// Sorted list of label name-value pair references. uses packref
repeated fixed32 label_symbols = 1 [(gogoproto.nullable) = false];
// Sorted by time, oldest sample first.
// TODO: support references for other types
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
}
message Label { message Label {
string name = 1; string name = 1;
string value = 2; string value = 2;

View file

@ -10,10 +10,10 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255 exit 255
fi fi
if ! [[ $(protoc --version) =~ "3.21.12" ]]; then #if ! [[ $(protoc --version) =~ "3.21.12" ]]; then
echo "could not find protoc 3.21.12, is it installed + in PATH?" # echo "could not find protoc 3.21.12, is it installed + in PATH?"
exit 255 # exit 255
fi #fi
# Since we run go install, go mod download, the go.sum will change. # Since we run go install, go mod download, the go.sum will change.
# Make a backup. # Make a backup.

View file

@ -7,8 +7,10 @@ trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT
declare -a INSTANCES declare -a INSTANCES
# (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags) # (sender,receiver) pairs to run: (sender_name; sender_flags; receiver_name; receiver_flags)
INSTANCES+=('sender-v1;;receiver-v1;') INSTANCES+=('sender-v1;;receiver-v1;')
INSTANCES+=('sender-v11;--enable-feature rw-1-1-sender;receiver-v11;--enable-feature rw-1-1-receiver') INSTANCES+=('sender-v11;--remote-write-format 1;receiver-v11;--remote-write-format 1')
INSTANCES+=('sender-v11-min32-optimized-varint;--remote-write-format 2;receiver-v11-min32-optimized-varint;--remote-write-format 2')
INSTANCES+=('sender-v11-min64-fixed;--remote-write-format 3;receiver-v11-min64-fixed;--remote-write-format 3')
INSTANCES+=('sender-v11-min32-fixed;--remote-write-format 4;receiver-v11-min32-fixed;--remote-write-format 4')
# ~~~~~~~~~~~~~ # ~~~~~~~~~~~~~
# append two ports to all instances # append two ports to all instances

View file

@ -1,5 +1,5 @@
global: global:
scrape_interval: 15s scrape_interval: 5s
external_labels: external_labels:
role: ${SENDER_NAME} role: ${SENDER_NAME}
@ -8,6 +8,8 @@ remote_write:
name: ${RECEIVER_NAME} name: ${RECEIVER_NAME}
metadata_config: metadata_config:
send: false send: false
queue_config:
max_samples_per_send: 5000
scrape_configs: scrape_configs:
${SCRAPE_CONFIGS} ${SCRAPE_CONFIGS}

View file

@ -81,11 +81,11 @@ func init() {
// Client allows reading and writing from/to a remote HTTP endpoint. // Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct { type Client struct {
remoteName string // Used to differentiate clients in metrics. remoteName string // Used to differentiate clients in metrics.
urlString string // url.String() urlString string // url.String()
remoteWrite11 bool // For write clients, ignored for read clients. rwFormat RemoteWriteFormat // For write clients, ignored for read clients.
Client *http.Client Client *http.Client
timeout time.Duration timeout time.Duration
retryOnRateLimit bool retryOnRateLimit bool
@ -96,14 +96,14 @@ type Client struct {
// ClientConfig configures a client. // ClientConfig configures a client.
type ClientConfig struct { type ClientConfig struct {
URL *config_util.URL URL *config_util.URL
RemoteWrite11 bool RemoteWriteFormat RemoteWriteFormat
Timeout model.Duration Timeout model.Duration
HTTPClientConfig config_util.HTTPClientConfig HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config SigV4Config *sigv4.SigV4Config
AzureADConfig *azuread.AzureADConfig AzureADConfig *azuread.AzureADConfig
Headers map[string]string Headers map[string]string
RetryOnRateLimit bool RetryOnRateLimit bool
} }
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server. // ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@ -165,7 +165,7 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient.Transport = otelhttp.NewTransport(t) httpClient.Transport = otelhttp.NewTransport(t)
return &Client{ return &Client{
remoteWrite11: conf.RemoteWrite11, rwFormat: conf.RemoteWriteFormat,
remoteName: name, remoteName: name,
urlString: conf.URL.String(), urlString: conf.URL.String(),
Client: httpClient, Client: httpClient,
@ -211,11 +211,11 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpReq.Header.Set("Content-Type", "application/x-protobuf") httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent) httpReq.Header.Set("User-Agent", UserAgent)
// Set the right header if we're using v1.1 remote write protocol if c.rwFormat == Base1 {
if c.remoteWrite11 {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
} else {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue) httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
} else {
// Set the right header if we're using v1.1 remote write protocol
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
} }
if attempt > 0 { if attempt > 0 {

View file

@ -794,6 +794,24 @@ func labelsToUint32Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []u
return result return result
} }
func labelsToUint64Slice(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint64) []uint64 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, symbolTable.Ref64Packed(l.Name))
result = append(result, symbolTable.Ref64Packed(l.Value))
})
return result
}
func labelsToUint32Packed(lbls labels.Labels, symbolTable *rwSymbolTable, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, symbolTable.Ref32Packed(l.Name))
result = append(result, symbolTable.Ref32Packed(l.Value))
})
return result
}
func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels { func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2) ls := labels.NewScratchBuilder(len(minLabels) / 2)
@ -817,6 +835,23 @@ func Uint32RefToLabels(symbols string, minLabels []uint32) labels.Labels {
return ls.Labels() return ls.Labels()
} }
func Uint64RefToLabels(symbols string, minLabels []uint64) labels.Labels {
ls := labels.NewScratchBuilder(len(minLabels) / 2)
labelIdx := 0
for labelIdx < len(minLabels) {
// todo, check for overflow?
offset, length := unpackRef64(minLabels[labelIdx])
name := symbols[offset : offset+length]
// todo, check for overflow?
offset, length = unpackRef64(minLabels[labelIdx+1])
value := symbols[offset : offset+length]
ls.Add(name, value)
labelIdx += 2
}
return ls.Labels()
}
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. // metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType { func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType {
mt := strings.ToUpper(string(t)) mt := strings.ToUpper(string(t))
@ -923,6 +958,44 @@ func DecodeMinimizedWriteRequest(r io.Reader) (*prompb.MinimizedWriteRequest, er
return &req, nil return &req, nil
} }
func DecodeMinimizedWriteRequestFixed64(r io.Reader) (*prompb.MinimizedWriteRequestFixed64, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.MinimizedWriteRequestFixed64
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func DecodeMinimizedWriteRequestFixed32(r io.Reader) (*prompb.MinimizedWriteRequestFixed32, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req prompb.MinimizedWriteRequestFixed32
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) { func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{ req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)), Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
@ -952,6 +1025,44 @@ func MinimizedWriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequest) (
return req, nil return req, nil
} }
// helper for tests
func min64WriteRequestToWriteRequest(redReq *prompb.MinimizedWriteRequestFixed64) (*prompb.WriteRequest, error) {
req := &prompb.WriteRequest{
Timeseries: make([]prompb.TimeSeries, len(redReq.Timeseries)),
//Metadata: redReq.Metadata,
}
for i, rts := range redReq.Timeseries {
lbls := Uint64RefToLabels(redReq.Symbols, rts.LabelSymbols)
ls := make([]prompb.Label, len(lbls))
for j, l := range lbls {
ls[j].Name = l.Name
ls[j].Value = l.Value
}
exemplars := make([]prompb.Exemplar, len(rts.Exemplars))
// TODO handle exemplars
//for j, e := range rts.Exemplars {
// exemplars[j].Value = e.Value
// exemplars[j].Timestamp = e.Timestamp
// exemplars[j].Labels = make([]prompb.Label, len(e.Labels))
//
// for k, l := range e.Labels {
// exemplars[j].Labels[k].Name = redReq.StringSymbolTable[l.NameRef]
// exemplars[j].Labels[k].Value = redReq.StringSymbolTable[l.ValueRef]
// }
//}
req.Timeseries[i].Labels = ls
req.Timeseries[i].Samples = rts.Samples
req.Timeseries[i].Exemplars = exemplars
req.Timeseries[i].Histograms = rts.Histograms
}
return req, nil
}
// for use with minimized remote write proto format // for use with minimized remote write proto format
func packRef(offset, length int) uint32 { func packRef(offset, length int) uint32 {
return uint32((offset&0xFFFFF)<<12 | (length & 0xFFF)) return uint32((offset&0xFFFFF)<<12 | (length & 0xFFF))
@ -960,3 +1071,12 @@ func packRef(offset, length int) uint32 {
func unpackRef(ref uint32) (offset, length int) { func unpackRef(ref uint32) (offset, length int) {
return int(ref>>12) & 0xFFFFF, int(ref & 0xFFF) return int(ref>>12) & 0xFFFFF, int(ref & 0xFFF)
} }
// for use with minimized remote write proto format
func packRef64(offset, length uint32) uint64 {
return (uint64(offset) << 32) | uint64(length)
}
func unpackRef64(ref uint64) (offset, length uint32) {
return uint32(ref >> 32), uint32(ref & 0xFFFFFFFF)
}

View file

@ -107,6 +107,39 @@ var writeRequestMinimizedFixture = func() *prompb.MinimizedWriteRequest {
} }
}() }()
var writeRequestMinimized64Fixture = func() *prompb.MinimizedWriteRequestFixed64 {
st := newRwSymbolTable()
labels := []uint64{}
for _, s := range []string{
"__name__", "test_metric1",
"b", "c",
"baz", "qux",
"d", "e",
"foo", "bar",
} {
ref := st.Ref64Packed(s)
labels = append(labels, ref)
}
return &prompb.MinimizedWriteRequestFixed64{
Timeseries: []prompb.MinimizedTimeSeriesFixed64{
{
LabelSymbols: labels,
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat())},
},
{
LabelSymbols: labels,
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat())},
},
},
Symbols: st.LabelsString(),
}
}()
func TestValidateLabelsAndMetricName(t *testing.T) { func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct { tests := []struct {
input []prompb.Label input []prompb.Label
@ -568,13 +601,6 @@ func TestDecodeMinWriteRequest(t *testing.T) {
require.Equal(t, writeRequestMinimizedFixture, actual) require.Equal(t, writeRequestMinimizedFixture, actual)
} }
func TestMinimizedWriteRequestToWriteRequest(t *testing.T) {
actual, err := MinimizedWriteRequestToWriteRequest(writeRequestMinimizedFixture)
require.NoError(t, err)
require.Equal(t, writeRequestFixture, actual)
}
func TestNilHistogramProto(t *testing.T) { func TestNilHistogramProto(t *testing.T) {
// This function will panic if it impromperly handles nil // This function will panic if it impromperly handles nil
// values, causing the test to fail. // values, causing the test to fail.
@ -893,3 +919,16 @@ func (c *mockChunkIterator) Next() bool {
func (c *mockChunkIterator) Err() error { func (c *mockChunkIterator) Err() error {
return nil return nil
} }
func TestPackRef64(t *testing.T) {
rw := newRwSymbolTable()
name := "__name__"
value := "asdfasd"
nameRef := rw.Ref64Packed(name)
valRef := rw.Ref64Packed(value)
nameOffset, nameLength := unpackRef64(nameRef)
valOffset, valLength := unpackRef64(valRef)
require.Equal(t, name, string(rw.symbols[nameOffset:nameOffset+nameLength]))
require.Equal(t, value, string(rw.symbols[valOffset:valOffset+valLength]))
}

View file

@ -389,6 +389,15 @@ type WriteClient interface {
Endpoint() string Endpoint() string
} }
type RemoteWriteFormat int64
const (
Base1 RemoteWriteFormat = iota // original map based format
Min32Optimized // two 32bit varint plus marshalling optimization
Min64Fixed // a single fixed64 bit value, first 32 are offset and 2nd 32 are
Min32Fixed // two 32bit fixed, similar to optimized but not varints + no manual marshalling optimization
)
// QueueManager manages a queue of samples to be sent to the Storage // QueueManager manages a queue of samples to be sent to the Storage
// indicated by the provided WriteClient. Implements writeTo interface // indicated by the provided WriteClient. Implements writeTo interface
// used by WAL Watcher. // used by WAL Watcher.
@ -406,7 +415,7 @@ type QueueManager struct {
watcher *wlog.Watcher watcher *wlog.Watcher
metadataWatcher *MetadataWatcher metadataWatcher *MetadataWatcher
// experimental feature, new remote write proto format // experimental feature, new remote write proto format
internFormat bool rwFormat RemoteWriteFormat
clientMtx sync.RWMutex clientMtx sync.RWMutex
storeClient WriteClient storeClient WriteClient
@ -454,7 +463,7 @@ func NewQueueManager(
sm ReadyScrapeManager, sm ReadyScrapeManager,
enableExemplarRemoteWrite bool, enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool, enableNativeHistogramRemoteWrite bool,
internFormat bool, rwFormat RemoteWriteFormat,
) *QueueManager { ) *QueueManager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
@ -477,7 +486,7 @@ func NewQueueManager(
storeClient: client, storeClient: client,
sendExemplars: enableExemplarRemoteWrite, sendExemplars: enableExemplarRemoteWrite,
sendNativeHistograms: enableNativeHistogramRemoteWrite, sendNativeHistograms: enableNativeHistogramRemoteWrite,
internFormat: internFormat, rwFormat: rwFormat,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
@ -1276,7 +1285,6 @@ func (q *queue) Chan() <-chan []timeSeries {
func (q *queue) Batch() []timeSeries { func (q *queue) Batch() []timeSeries {
q.batchMtx.Lock() q.batchMtx.Lock()
defer q.batchMtx.Unlock() defer q.batchMtx.Unlock()
select { select {
case batch := <-q.batchQueue: case batch := <-q.batchQueue:
return batch return batch
@ -1363,6 +1371,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
max += int(float64(max) * 0.1) max += int(float64(max) * 0.1)
} }
// TODO we should make an interface for the timeseries type
batchQueue := queue.Chan() batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max) pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData { for i := range pendingData {
@ -1377,6 +1386,16 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
pendingMinimizedData[i].Samples = []prompb.Sample{{}} pendingMinimizedData[i].Samples = []prompb.Sample{{}}
} }
pendingMin64Data := make([]prompb.MinimizedTimeSeriesFixed64, max)
for i := range pendingMin64Data {
pendingMin64Data[i].Samples = []prompb.Sample{{}}
}
pendingMin32Data := make([]prompb.MinimizedTimeSeriesFixed32, max)
for i := range pendingMin32Data {
pendingMin32Data[i].Samples = []prompb.Sample{{}}
}
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() { stop := func() {
if !timer.Stop() { if !timer.Stop() {
@ -1411,17 +1430,28 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok { if !ok {
return return
} }
if s.qm.internFormat { switch s.qm.rwFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms) case Base1:
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case Min64Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed64(&symbolTable, batch, pendingMin64Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMin64Samples(ctx, pendingMin64Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
case Min32Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed32(&symbolTable, batch, pendingMin32Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMin32Samples(ctx, pendingMin32Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
} }
queue.ReturnForReuse(batch) queue.ReturnForReuse(batch)
stop() stop()
@ -1430,18 +1460,27 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
case <-timer.C: case <-timer.C:
batch := queue.Batch() batch := queue.Batch()
if len(batch) > 0 { if len(batch) > 0 {
if s.qm.internFormat { switch s.qm.rwFormat {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms) case Base1:
n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
} else {
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms n := nPendingSamples + nPendingExemplars + nPendingHistograms
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf) s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
case Min32Optimized:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeries(&symbolTable, batch, pendingMinimizedData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMinSamples(ctx, pendingMinimizedData[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, &pBufRaw, &buf)
symbolTable.clear()
case Min64Fixed:
nPendingSamples, nPendingExemplars, nPendingHistograms := populateMinimizedTimeSeriesFixed64(&symbolTable, batch, pendingMin64Data, s.qm.sendExemplars, s.qm.sendNativeHistograms)
n := nPendingSamples + nPendingExemplars + nPendingHistograms
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
s.sendMin64Samples(ctx, pendingMin64Data[:n], symbolTable.LabelsString(), nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
symbolTable.clear()
} }
} }
queue.ReturnForReuse(batch) queue.ReturnForReuse(batch)
@ -1514,6 +1553,30 @@ func (s *shards) sendMinSamples(ctx context.Context, samples []prompb.MinimizedT
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin)) s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
} }
func (s *shards) sendMin64Samples(ctx context.Context, samples []prompb.MinimizedTimeSeriesFixed64, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildMinimizedWriteRequestFixed64(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) sendMin32Samples(ctx context.Context, samples []prompb.MinimizedTimeSeriesFixed32, labels string, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
begin := time.Now()
// Build the ReducedWriteRequest with no metadata.
// Failing to build the write request is non-recoverable, since it will
// only error if marshaling the proto to bytes fails.
req, highest, err := buildMinimizedWriteRequestFixed32(samples, labels, pBuf, buf)
if err == nil {
err = s.sendSamplesWithBackoff(ctx, req, sampleCount, exemplarCount, histogramCount, highest)
}
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, time.Since(begin))
}
func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) { func (s *shards) updateMetrics(ctx context.Context, err error, sampleCount, exemplarCount, histogramCount int, duration time.Duration) {
if err != nil { if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
@ -1638,6 +1701,156 @@ func populateMinimizedTimeSeries(symbolTable *rwSymbolTable, batch []timeSeries,
return nPendingSamples, nPendingExemplars, nPendingHistograms return nPendingSamples, nPendingExemplars, nPendingHistograms
} }
func populateMinimizedTimeSeriesFixed64(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesFixed64, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint64Slice(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeriesFixed32(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesFixed32, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint32Slice(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func populateMinimizedTimeSeriesPacking(symbolTable *rwSymbolTable, batch []timeSeries, pendingData []prompb.MinimizedTimeSeriesPacking, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
// pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].LabelSymbols = labelsToUint32Packed(d.seriesLabels, symbolTable, pendingData[nPending].LabelSymbols)
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
Value: d.value,
Timestamp: d.timestamp,
})
nPendingSamples++
// TODO: handle all types
//case tExemplar:
// // TODO(npazosmendez) optimize?
// l := make([]prompb.LabelRef, 0, d.exemplarLabels.Len())
// d.exemplarLabels.Range(func(el labels.Label) {
// nRef := pool.intern(el.Name)
// vRef := pool.intern(el.Value)
// l = append(l, prompb.LabelRef{NameRef: nRef, ValueRef: vRef})
// })
// pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.ExemplarRef{
// Labels: l,
// Value: d.value,
// Timestamp: d.timestamp,
// })
// nPendingExemplars++
case tHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error { func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error {
backoff := cfg.MinBackoff backoff := cfg.MinBackoff
sleepDuration := model.Duration(0) sleepDuration := model.Duration(0)
@ -1742,13 +1955,17 @@ type offLenPair struct {
} }
type rwSymbolTable struct { type rwSymbolTable struct {
symbols []byte symbols []byte
symbolsMap map[string]offLenPair symbolsMap map[string]offLenPair
symbolsMap64Packed map[string]uint64
symbolsMap32Packed map[string]uint32
} }
func newRwSymbolTable() rwSymbolTable { func newRwSymbolTable() rwSymbolTable {
return rwSymbolTable{ return rwSymbolTable{
symbolsMap: make(map[string]offLenPair), symbolsMap: make(map[string]offLenPair),
symbolsMap64Packed: make(map[string]uint64),
symbolsMap32Packed: make(map[string]uint32),
} }
} }
@ -1757,11 +1974,41 @@ func (r *rwSymbolTable) Ref(str string) (off uint32, leng uint32) {
return offlen.Off, offlen.Len return offlen.Off, offlen.Len
} }
off, leng = uint32(len(r.symbols)), uint32(len(str)) off, leng = uint32(len(r.symbols)), uint32(len(str))
if int(off) > len(r.symbols) {
panic(1)
}
r.symbols = append(r.symbols, str...) r.symbols = append(r.symbols, str...)
if len(r.symbols) < int(off+leng) {
panic(2)
}
r.symbolsMap[str] = offLenPair{off, leng} r.symbolsMap[str] = offLenPair{off, leng}
return return
} }
func (r *rwSymbolTable) Ref64Packed(str string) uint64 {
// todo, check for overflowing the uint32 based on documented format?
if ref, ok := r.symbolsMap64Packed[str]; ok {
return ref
}
if len(r.symbols) > math.MaxUint32 || len(str) > math.MaxUint32 || len(str)+len(r.symbols) > math.MaxUint32 {
panic(1)
}
r.symbolsMap64Packed[str] = packRef64(uint32(len(r.symbols)), uint32(len(str)))
r.symbols = append(r.symbols, str...)
return r.symbolsMap64Packed[str]
}
func (r *rwSymbolTable) Ref32Packed(str string) uint32 {
// todo, check for overflowing the uint32 based on documented format?
if ref, ok := r.symbolsMap32Packed[str]; ok {
return ref
}
r.symbolsMap32Packed[str] = packRef(len(r.symbols), len(str))
r.symbols = append(r.symbols, str...)
return r.symbolsMap32Packed[str]
}
func (r *rwSymbolTable) LabelsString() string { func (r *rwSymbolTable) LabelsString() string {
return *((*string)(unsafe.Pointer(&r.symbols))) return *((*string)(unsafe.Pointer(&r.symbols)))
} }
@ -1770,6 +2017,12 @@ func (r *rwSymbolTable) clear() {
for k := range r.symbolsMap { for k := range r.symbolsMap {
delete(r.symbolsMap, k) delete(r.symbolsMap, k)
} }
for k := range r.symbolsMap64Packed {
delete(r.symbolsMap64Packed, k)
}
for k := range r.symbolsMap32Packed {
delete(r.symbolsMap32Packed, k)
}
r.symbols = r.symbols[:0] r.symbols = r.symbols[:0]
} }
@ -1817,3 +2070,141 @@ func buildMinimizedWriteRequest(samples []prompb.MinimizedTimeSeries, labels str
} }
return compressed, highest, nil return compressed, highest, nil
} }
func buildMinimizedWriteRequestFixed64(samples []prompb.MinimizedTimeSeriesFixed64, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestFixed64{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestFixed32(samples []prompb.MinimizedTimeSeriesFixed32, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestFixed32{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}
func buildMinimizedWriteRequestPacking(samples []prompb.MinimizedTimeSeriesPacking, labels string, pBuf *proto.Buffer, buf *[]byte) ([]byte, int64, error) {
var highest int64
for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp
}
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
highest = ts.Histograms[0].Timestamp
}
}
req := &prompb.MinimizedWriteRequestPacking{
Symbols: labels,
Timeseries: samples,
}
if pBuf == nil {
pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient.
} else {
pBuf.Reset()
}
err := pBuf.Marshal(req)
if err != nil {
return nil, 0, err
}
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
*buf = (*buf)[0:cap(*buf)]
} else {
buf = &[]byte{}
}
compressed := snappy.Encode(*buf, pBuf.Bytes())
if n := snappy.MaxEncodedLen(len(pBuf.Bytes())); buf != nil && n > len(*buf) {
// grow the buffer for the next time
*buf = make([]byte, n)
}
return compressed, highest, nil
}

View file

@ -67,19 +67,25 @@ func TestSampleDelivery(t *testing.T) {
exemplars bool exemplars bool
histograms bool histograms bool
floatHistograms bool floatHistograms bool
remoteWrite11 bool rwFormat RemoteWriteFormat
}{ }{
{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"}, //{samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"}, //{samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
{samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"}, //{samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"}, //{samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"}, //{samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
{remoteWrite11: true, samples: true, exemplars: false, histograms: false, name: "interned samples only"}, {rwFormat: Min32Optimized, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
{remoteWrite11: true, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"}, //{rwFormat: Min32Optimized, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
{remoteWrite11: true, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"}, //{rwFormat: Min32Optimized, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: true, name: "interned histograms only"}, //{rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
{remoteWrite11: true, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"}, //{rwFormat: Min32Optimized, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
//{rwFormat: Min64Fixed, samples: true, exemplars: false, histograms: false, name: "interned samples only"},
//{rwFormat: Min64Fixed, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "interned samples, exemplars, and histograms"},
//{rwFormat: Min64Fixed, samples: false, exemplars: true, histograms: false, name: "interned exemplars only"},
//{rwFormat: Min64Fixed, samples: false, exemplars: false, histograms: true, name: "interned histograms only"},
//{rwFormat: Min64Fixed, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "interned float histograms only"},
} }
// Let's create an even number of send batches so we don't run into the // Let's create an even number of send batches so we don't run into the
@ -106,30 +112,30 @@ func TestSampleDelivery(t *testing.T) {
for _, tc := range testcases { for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.remoteWrite11) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, tc.rwFormat)
defer s.Close() defer s.Close()
var ( var (
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
exemplars []record.RefExemplar //exemplars []record.RefExemplar
histograms []record.RefHistogramSample //histograms []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample //floatHistograms []record.RefFloatHistogramSample
) )
// Generates same series in both cases. // Generates same series in both cases.
if tc.samples { if tc.samples {
samples, series = createTimeseries(n, n) samples, series = createTimeseries(n, n)
} }
if tc.exemplars { //if tc.exemplars {
exemplars, series = createExemplars(n, n) // exemplars, series = createExemplars(n, n)
} //}
if tc.histograms { //if tc.histograms {
histograms, _, series = createHistograms(n, n, false) // histograms, _, series = createHistograms(n, n, false)
} //}
if tc.floatHistograms { //if tc.floatHistograms {
_, floatHistograms, series = createHistograms(n, n, true) // _, floatHistograms, series = createHistograms(n, n, true)
} //}
// Apply new config. // Apply new config.
queueConfig.Capacity = len(samples) queueConfig.Capacity = len(samples)
@ -139,38 +145,38 @@ func TestSampleDelivery(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
qm := s.rws.queues[hash] qm := s.rws.queues[hash]
c := NewTestWriteClient(tc.remoteWrite11) c := NewTestWriteClient(tc.rwFormat)
qm.SetClient(c) qm.SetClient(c)
qm.StoreSeries(series, 0) qm.StoreSeries(series, 0)
// Send first half of data. // Send first half of data.
c.expectSamples(samples[:len(samples)/2], series) c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series) //c.expectExemplars(exemplars[:len(exemplars)/2], series)
c.expectHistograms(histograms[:len(histograms)/2], series) //c.expectHistograms(histograms[:len(histograms)/2], series)
c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series) //c.expectFloatHistograms(floatHistograms[:len(floatHistograms)/2], series)
qm.Append(samples[:len(samples)/2]) qm.Append(samples[:len(samples)/2])
qm.AppendExemplars(exemplars[:len(exemplars)/2]) //qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2]) //qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2]) //qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
c.waitForExpectedData(t) c.waitForExpectedData(t)
// Send second half of data. // Send second half of data.
c.expectSamples(samples[len(samples)/2:], series) c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series) //c.expectExemplars(exemplars[len(exemplars)/2:], series)
c.expectHistograms(histograms[len(histograms)/2:], series) //c.expectHistograms(histograms[len(histograms)/2:], series)
c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series) //c.expectFloatHistograms(floatHistograms[len(floatHistograms)/2:], series)
qm.Append(samples[len(samples)/2:]) qm.Append(samples[len(samples)/2:])
qm.AppendExemplars(exemplars[len(exemplars)/2:]) //qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:]) //qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:]) //qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
c.waitForExpectedData(t) c.waitForExpectedData(t)
}) })
} }
} }
func TestMetadataDelivery(t *testing.T) { func TestMetadataDelivery(t *testing.T) {
c := NewTestWriteClient(false) c := NewTestWriteClient(Base1)
dir := t.TempDir() dir := t.TempDir()
@ -178,7 +184,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -204,13 +210,13 @@ func TestMetadataDelivery(t *testing.T) {
} }
func TestSampleDeliveryTimeout(t *testing.T) { func TestSampleDeliveryTimeout(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1" //remoteWrite11 := proto == "1.1"
// Let's send one less sample than batch size, and wait the timeout duration // Let's send one less sample than batch size, and wait the timeout duration
n := 9 n := 9
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -220,7 +226,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -238,9 +244,8 @@ func TestSampleDeliveryTimeout(t *testing.T) {
} }
func TestSampleDeliveryOrder(t *testing.T) { func TestSampleDeliveryOrder(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
ts := 10 ts := 10
n := config.DefaultQueueConfig.MaxSamplesPerSend * ts n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
samples := make([]record.RefSample, 0, n) samples := make([]record.RefSample, 0, n)
@ -258,7 +263,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
}) })
} }
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series) c.expectSamples(samples, series)
dir := t.TempDir() dir := t.TempDir()
@ -267,7 +272,7 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -289,7 +294,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
@ -327,7 +332,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for i := 0; i < numSegments; i++ { for i := 0; i < numSegments; i++ {
series := []record.RefSeries{} series := []record.RefSeries{}
for j := 0; j < numSeries; j++ { for j := 0; j < numSeries; j++ {
@ -341,15 +346,14 @@ func TestSeriesReset(t *testing.T) {
} }
func TestReshard(t *testing.T) { func TestReshard(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
size := 10 // Make bigger to find more races. size := 10 // Make bigger to find more races.
nSeries := 6 nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size nSamples := config.DefaultQueueConfig.Capacity * size
samples, series := createTimeseries(nSamples, nSeries) samples, series := createTimeseries(nSamples, nSeries)
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
c.expectSamples(samples, series) c.expectSamples(samples, series)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
@ -359,7 +363,7 @@ func TestReshard(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -385,10 +389,9 @@ func TestReshard(t *testing.T) {
} }
func TestReshardRaceWithStop(t *testing.T) { func TestReshardRaceWithStop(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1" c := NewTestWriteClient(rwFormat)
c := NewTestWriteClient(remoteWrite11)
var m *QueueManager var m *QueueManager
h := sync.Mutex{} h := sync.Mutex{}
@ -400,7 +403,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() { go func() {
for { for {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start() m.Start()
h.Unlock() h.Unlock()
h.Lock() h.Lock()
@ -425,9 +428,8 @@ func TestReshardRaceWithStop(t *testing.T) {
} }
func TestReshardPartialBatch(t *testing.T) { func TestReshardPartialBatch(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
samples, series := createTimeseries(1, 10) samples, series := createTimeseries(1, 10)
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
@ -440,7 +442,7 @@ func TestReshardPartialBatch(t *testing.T) {
cfg.BatchSendDeadline = model.Duration(batchSendDeadline) cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -472,9 +474,8 @@ func TestReshardPartialBatch(t *testing.T) {
// where a large scrape (> capacity + max samples per send) is appended at the // where a large scrape (> capacity + max samples per send) is appended at the
// same time as a batch times out according to the batch send deadline. // same time as a batch times out according to the batch send deadline.
func TestQueueFilledDeadlock(t *testing.T) { func TestQueueFilledDeadlock(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) { t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
remoteWrite11 := proto == "1.1"
samples, series := createTimeseries(50, 1) samples, series := createTimeseries(50, 1)
c := NewNopWriteClient() c := NewNopWriteClient()
@ -490,7 +491,7 @@ func TestQueueFilledDeadlock(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, t.TempDir(), newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, flushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -515,14 +516,14 @@ func TestQueueFilledDeadlock(t *testing.T) {
} }
func TestReleaseNoninternedString(t *testing.T) { func TestReleaseNoninternedString(t *testing.T) {
for _, proto := range []string{"1.1", "1.0"} { for _, rwFormat := range []RemoteWriteFormat{Base1, Min32Optimized, Min64Fixed} {
t.Run(proto, func(t *testing.T) {
remoteWrite11 := proto == "1.1" t.Run(fmt.Sprint(rwFormat), func(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient(remoteWrite11) c := NewTestWriteClient(rwFormat)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, remoteWrite11) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, rwFormat)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -570,8 +571,9 @@ func TestShouldReshard(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
for _, c := range cases { for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient(false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) client := NewTestWriteClient(Base1)
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.numShards = c.startingShards m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn) m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut) m.dataOut.incr(c.samplesOut)
@ -706,16 +708,16 @@ type TestWriteClient struct {
wg sync.WaitGroup wg sync.WaitGroup
mtx sync.Mutex mtx sync.Mutex
buf []byte buf []byte
expectRemoteWrite11 bool rwFormat RemoteWriteFormat
} }
func NewTestWriteClient(expectRemoteWrite11 bool) *TestWriteClient { func NewTestWriteClient(rwFormat RemoteWriteFormat) *TestWriteClient {
return &TestWriteClient{ return &TestWriteClient{
withWaitGroup: true, withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{}, receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{}, expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{}, receivedMetadata: map[string][]prompb.MetricMetadata{},
expectRemoteWrite11: expectRemoteWrite11, rwFormat: rwFormat,
} }
} }
@ -803,6 +805,7 @@ func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
c.mtx.Lock() c.mtx.Lock()
defer c.mtx.Unlock() defer c.mtx.Unlock()
for ts, expectedSamples := range c.expectedSamples { for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts) require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
} }
@ -831,25 +834,33 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
} }
var reqProto *prompb.WriteRequest var reqProto *prompb.WriteRequest
if c.expectRemoteWrite11 { switch c.rwFormat {
var reqReduced prompb.MinimizedWriteRequest case Base1:
err = proto.Unmarshal(reqBuf, &reqReduced)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqReduced)
}
} else {
reqProto = &prompb.WriteRequest{} reqProto = &prompb.WriteRequest{}
err = proto.Unmarshal(reqBuf, reqProto) err = proto.Unmarshal(reqBuf, reqProto)
case Min32Optimized:
var reqMin prompb.MinimizedWriteRequest
err = proto.Unmarshal(reqBuf, &reqMin)
if err == nil {
reqProto, err = MinimizedWriteRequestToWriteRequest(&reqMin)
}
case Min64Fixed:
var reqMin64 prompb.MinimizedWriteRequestFixed64
err = proto.Unmarshal(reqBuf, &reqMin64)
if err == nil {
reqProto, err = min64WriteRequestToWriteRequest(&reqMin64)
}
} }
if err != nil { if err != nil {
fmt.Println("error: ", err)
return err return err
} }
count := 0 count := 0
for _, ts := range reqProto.Timeseries { for _, ts := range reqProto.Timeseries {
labels := labelProtosToLabels(ts.Labels) ls := labelProtosToLabels(ts.Labels)
seriesName := labels.Get("__name__") seriesName := ls.Get("__name__")
for _, sample := range ts.Samples { for _, sample := range ts.Samples {
count++ count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
@ -860,12 +871,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex) c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
} }
for _, histogram := range ts.Histograms { for _, hist := range ts.Histograms {
count++ count++
if histogram.IsFloatHistogram() { if hist.IsFloatHistogram() {
c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], histogram) c.receivedFloatHistograms[seriesName] = append(c.receivedFloatHistograms[seriesName], hist)
} else { } else {
c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], histogram) c.receivedHistograms[seriesName] = append(c.receivedHistograms[seriesName], hist)
} }
} }
@ -965,7 +976,8 @@ func BenchmarkSampleSend(b *testing.B) {
dir := b.TempDir() dir := b.TempDir()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
// These should be received by the client. // These should be received by the client.
@ -1009,9 +1021,10 @@ func BenchmarkStartup(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
// todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, logger, dir, m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration), newEWMARate(ewmaWeight, shardUpdateDuration),
cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, false) cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2] m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run() err := m.watcher.Run()
@ -1094,7 +1107,8 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
// Need to start the queue manager so the proper metrics are initialized. // Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual // However we can stop it right away since we don't need to do any actual
@ -1163,7 +1177,7 @@ func TestCalculateDesiredShards(t *testing.T) {
} }
func TestCalculateDesiredShardsDetail(t *testing.T) { func TestCalculateDesiredShardsDetail(t *testing.T) {
c := NewTestWriteClient(false) c := NewTestWriteClient(Base1)
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
@ -1171,7 +1185,8 @@ func TestCalculateDesiredShardsDetail(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, false) // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, Base1)
for _, tc := range []struct { for _, tc := range []struct {
name string name string
@ -1531,3 +1546,138 @@ func BenchmarkBuildMinimizedWriteRequest(b *testing.B) {
}) })
} }
} }
func BenchmarkBuildMinimizedWriteRequestFixed32(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesFixed32, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesFixed32(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestFixed32(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesFixed32(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestFixed32(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}
func BenchmarkBuildMinimizedWriteRequestFixed64(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesFixed64, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesFixed64(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestFixed64(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesFixed64(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestFixed64(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}
func BenchmarkBuildMinimizedWriteRequestPacking(b *testing.B) {
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
testcase{createDummyTimeSeries(2)},
testcase{createDummyTimeSeries(10)},
testcase{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
symbolTable := newRwSymbolTable()
pBuf := proto.NewBuffer(nil)
buff := make([]byte, 0)
seriesBuff := make([]prompb.MinimizedTimeSeriesPacking, len(tc.batch))
//total := 0
for i := range seriesBuff {
seriesBuff[i].Samples = []prompb.Sample{{}}
// todo: add other types
//seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
}
//pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateMinimizedTimeSeriesPacking(&symbolTable, tc.batch, seriesBuff, true, true)
buildMinimizedWriteRequestPacking(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateMinimizedTimeSeriesPacking(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, err := buildMinimizedWriteRequestPacking(seriesBuff, symbolTable.LabelsString(), pBuf, &buff)
if err != nil {
b.Fatal(err)
}
symbolTable.clear()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
})
}
}

View file

@ -91,7 +91,8 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)?
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs, RemoteReadConfigs: tc.cfgs,

View file

@ -62,7 +62,7 @@ type Storage struct {
} }
// NewStorage returns a remote.Storage. // NewStorage returns a remote.Storage.
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *Storage { func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *Storage {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger, logger: logger,
localStartTimeCallback: stCallback, localStartTimeCallback: stCallback,
} }
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, remoteWrite11) s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, rwFormat)
return s return s
} }

View file

@ -29,7 +29,8 @@ import (
func TestStorageLifecycle(t *testing.T) { func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -56,7 +57,8 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) { func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{}, GlobalConfig: config.GlobalConfig{},
@ -77,7 +79,8 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) { func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{ GlobalConfig: config.GlobalConfig{
@ -102,7 +105,8 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) { func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{ GlobalConfig: config.GlobalConfig{

View file

@ -65,7 +65,7 @@ type WriteStorage struct {
externalLabels labels.Labels externalLabels labels.Labels
dir string dir string
queues map[string]*QueueManager queues map[string]*QueueManager
remoteWrite11 bool rwFormat RemoteWriteFormat
samplesIn *ewmaRate samplesIn *ewmaRate
flushDeadline time.Duration flushDeadline time.Duration
interner *pool interner *pool
@ -77,13 +77,13 @@ type WriteStorage struct {
} }
// NewWriteStorage creates and runs a WriteStorage. // NewWriteStorage creates and runs a WriteStorage.
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, remoteWrite11 bool) *WriteStorage { func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, rwFormat RemoteWriteFormat) *WriteStorage {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
rws := &WriteStorage{ rws := &WriteStorage{
queues: make(map[string]*QueueManager), queues: make(map[string]*QueueManager),
remoteWrite11: remoteWrite11, rwFormat: rwFormat,
watcherMetrics: wlog.NewWatcherMetrics(reg), watcherMetrics: wlog.NewWatcherMetrics(reg),
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg), liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
logger: logger, logger: logger,
@ -166,14 +166,14 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
} }
c, err := NewWriteClient(name, &ClientConfig{ c, err := NewWriteClient(name, &ClientConfig{
URL: rwConf.URL, URL: rwConf.URL,
RemoteWrite11: rws.remoteWrite11, RemoteWriteFormat: rws.rwFormat,
Timeout: rwConf.RemoteTimeout, Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig, HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config, SigV4Config: rwConf.SigV4Config,
AzureADConfig: rwConf.AzureADConfig, AzureADConfig: rwConf.AzureADConfig,
Headers: rwConf.Headers, Headers: rwConf.Headers,
RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit,
}) })
if err != nil { if err != nil {
return err return err
@ -210,7 +210,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper, rws.scraper,
rwConf.SendExemplars, rwConf.SendExemplars,
rwConf.SendNativeHistograms, rwConf.SendNativeHistograms,
rws.remoteWrite11, rws.rwFormat,
) )
// Keep track of which queues are new so we know which to start. // Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash) newHashes = append(newHashes, hash)

View file

@ -46,17 +46,17 @@ type writeHandler struct {
// Experimental feature, new remote write proto format // Experimental feature, new remote write proto format
// The handler will accept the new format, but it can still accept the old one // The handler will accept the new format, but it can still accept the old one
enableRemoteWrite11 bool // TODO: this should eventually be via content negotiation
rwFormat RemoteWriteFormat
} }
// NewWriteHandler creates a http.Handler that accepts remote write requests and // NewWriteHandler creates a http.Handler that accepts remote write requests and
// writes them to the provided appendable. // writes them to the provided appendable.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, enableRemoteWrite11 bool) http.Handler { func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, rwFormat RemoteWriteFormat) http.Handler {
h := &writeHandler{ h := &writeHandler{
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
enableRemoteWrite11: enableRemoteWrite11, rwFormat: rwFormat,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus", Namespace: "prometheus",
Subsystem: "api", Subsystem: "api",
@ -74,11 +74,19 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error var err error
var req *prompb.WriteRequest var req *prompb.WriteRequest
var reqMin *prompb.MinimizedWriteRequest var reqMin *prompb.MinimizedWriteRequest
var reqMin64Fixed *prompb.MinimizedWriteRequestFixed64
var reqMin32Fixed *prompb.MinimizedWriteRequestFixed32
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue { // TODO: this should eventually be done via content negotiation/looking at the header
reqMin, err = DecodeMinimizedWriteRequest(r.Body) switch h.rwFormat {
} else { case Base1:
req, err = DecodeWriteRequest(r.Body) req, err = DecodeWriteRequest(r.Body)
case Min32Optimized:
reqMin, err = DecodeMinimizedWriteRequest(r.Body)
case Min64Fixed:
reqMin64Fixed, err = DecodeMinimizedWriteRequestFixed64(r.Body)
case Min32Fixed:
reqMin32Fixed, err = DecodeMinimizedWriteRequestFixed32(r.Body)
} }
if err != nil { if err != nil {
@ -87,10 +95,16 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
if h.enableRemoteWrite11 && r.Header.Get(RemoteWriteVersionHeader) == RemoteWriteVersion11HeaderValue { // TODO: this should eventually be done detecting the format version above
err = h.writeMin(r.Context(), reqMin) switch h.rwFormat {
} else { case Base1:
err = h.write(r.Context(), req) err = h.write(r.Context(), req)
case Min32Optimized:
err = h.writeMin(r.Context(), reqMin)
case Min64Fixed:
err = h.writeMin64(r.Context(), reqMin64Fixed)
case Min32Fixed:
err = h.writeMin32(r.Context(), reqMin32Fixed)
} }
switch { switch {
@ -323,3 +337,81 @@ func (h *writeHandler) writeMin(ctx context.Context, req *prompb.MinimizedWriteR
return nil return nil
} }
func (h *writeHandler) writeMin64(ctx context.Context, req *prompb.MinimizedWriteRequestFixed64) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
ls := Uint64RefToLabels(req.Symbols, ts.LabelSymbols)
err := h.appendSamples(app, ts.Samples, ls)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(ep)
//e := exemplarRefProtoToExemplar(req.StringSymbolTable, ep)
h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, ls)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}
func (h *writeHandler) writeMin32(ctx context.Context, req *prompb.MinimizedWriteRequestFixed32) (err error) {
outOfOrderExemplarErrs := 0
app := h.appendable.Appender(ctx)
defer func() {
if err != nil {
_ = app.Rollback()
return
}
err = app.Commit()
}()
for _, ts := range req.Timeseries {
ls := Uint32RefToLabels(req.Symbols, ts.LabelSymbols)
err := h.appendSamples(app, ts.Samples, ls)
if err != nil {
return err
}
for _, ep := range ts.Exemplars {
e := exemplarProtoToExemplar(ep)
//e := exemplarRefProtoToExemplar(req.StringSymbolTable, ep)
h.appendExemplar(app, e, ls, &outOfOrderExemplarErrs)
}
err = h.appendHistograms(app, ts.Histograms, ls)
if err != nil {
return err
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
return nil
}

View file

@ -45,7 +45,8 @@ func TestRemoteWriteHandler(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -92,7 +93,8 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(nil, nil, appendable, true) // TODO: test with other proto format(s)
handler := NewWriteHandler(nil, nil, appendable, Min32Optimized)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -106,25 +108,124 @@ func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
// the reduced write request is equivalent to the write request fixture. // the reduced write request is equivalent to the write request fixture.
// we can use it for // we can use it for
for _, ts := range writeRequestFixture.Timeseries { for _, ts := range writeRequestFixture.Timeseries {
ls := labelProtosToLabels(ts.Labels) labels := labelProtosToLabels(ts.Labels)
for _, s := range ts.Samples { for _, s := range ts.Samples {
require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++ i++
} }
for _, e := range ts.Exemplars { for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels) exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++ j++
} }
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() { if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp) fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k]) require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else { } else {
h := HistogramProtoToHistogram(hp) h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k]) require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
}
}
}
//func TestRemoteWriteHandlerMinimizedFormat(t *testing.T) {
// buf, _, err := buildMinimizedWriteRequest(writeRequestMinimizedFixture.Timeseries, writeRequestMinimizedFixture.Symbols, nil, nil)
// require.NoError(t, err)
//
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// require.NoError(t, err)
//
// appendable := &mockAppendable{}
// handler := NewWriteHandler(nil, nil, appendable, false, true)
//
// recorder := httptest.NewRecorder()
// handler.ServeHTTP(recorder, req)
//
// resp := recorder.Result()
// require.Equal(t, http.StatusNoContent, resp.StatusCode)
//
// i := 0
// j := 0
// k := 0
// // the reduced write request is equivalent to the write request fixture.
// // we can use it for
// for _, ts := range writeRequestFixture.Timeseries {
// ls := labelProtosToLabels(ts.Labels)
// for _, s := range ts.Samples {
// require.Equal(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
// i++
// }
//
// for _, e := range ts.Exemplars {
// exemplarLabels := labelProtosToLabels(e.Labels)
// require.Equal(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
// j++
// }
//
// for _, hp := range ts.Histograms {
// if hp.IsFloatHistogram() {
// fh := FloatHistogramProtoToFloatHistogram(hp)
// require.Equal(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
// } else {
// h := HistogramProtoToHistogram(hp)
// require.Equal(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
// }
//
// k++
// }
// }
//}
func TestRemoteWriteHandler64Packed(t *testing.T) {
buf, _, err := buildMinimizedWriteRequestFixed64(writeRequestMinimized64Fixture.Timeseries, writeRequestMinimized64Fixture.Symbols, nil, nil)
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
require.NoError(t, err)
appendable := &mockAppendable{}
// TODO: test with other proto format(s)
handler := NewWriteHandler(nil, nil, appendable, Min64Fixed)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusNoContent, resp.StatusCode)
i := 0
j := 0
k := 0
// the reduced write request is equivalent to the write request fixture.
// we can use it for
for _, ts := range writeRequestMinimized64Fixture.Timeseries {
labels := Uint64RefToLabels(writeRequestMinimized64Fixture.Symbols, ts.LabelSymbols)
for _, s := range ts.Samples {
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := labelProtosToLabels(e.Labels)
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := FloatHistogramProtoToFloatHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := HistogramProtoToHistogram(hp)
require.Equal(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
} }
k++ k++
@ -145,7 +246,8 @@ func TestOutOfOrderSample(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestSample: 100, latestSample: 100,
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -170,7 +272,8 @@ func TestOutOfOrderExemplar(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestExemplar: 100, latestExemplar: 100,
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -193,8 +296,8 @@ func TestOutOfOrderHistogram(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
latestHistogram: 100, latestHistogram: 100,
} }
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -222,7 +325,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
} }
appendable := &mockAppendable{} appendable := &mockAppendable{}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
b.ResetTimer() b.ResetTimer()
@ -252,15 +356,31 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
// reqs = append(reqs, req) // reqs = append(reqs, req)
// } // }
// appendable := &mockAppendable{} // TODO(npazosmendez): add benchmarks with realistic scenarios
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, true, false) //func BenchmarkMin64RemoteWriteHandler(b *testing.B) {
// recorder := httptest.NewRecorder() // const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
// reqs := []*http.Request{}
// b.ResetTimer() // for i := 0; i < b.N; i++ {
// for _, req := range reqs { // rw := newRwSymbolTable()
// handler.ServeHTTP(recorder, req) // num := strings.Repeat(strconv.Itoa(i), 16)
// } // buf, _, err := buildMinimizedWriteRequestFixed64([]prompb.MinimizedTimeSeriesFixed64{{
// } // LabelSymbols: []uint64{
// rw.Ref64Packed("__name__"), rw.Ref64Packed("test_metric"),
// rw.Ref64Packed("test_label_name_" + num), rw.Ref64Packed(labelValue + num),
// },
// Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
// }}, rw.LabelsString(), nil, nil)
// require.NoError(b, err)
// req, err := http.NewRequest("", "", bytes.NewReader(buf))
// require.NoError(b, err)
// req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion11HeaderValue)
// reqs = append(reqs, req)
// }
//
// appendable := &mockAppendable{}
// // TODO: test with other proto format(s)
// handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
// recorder := httptest.NewRecorder()
func TestCommitErr(t *testing.T) { func TestCommitErr(t *testing.T) {
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil) buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil, nil)
@ -272,7 +392,8 @@ func TestCommitErr(t *testing.T) {
appendable := &mockAppendable{ appendable := &mockAppendable{
commitErr: fmt.Errorf("commit error"), commitErr: fmt.Errorf("commit error"),
} }
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, false) // TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, Base1)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
@ -297,8 +418,8 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
b.Cleanup(func() { b.Cleanup(func() {
require.NoError(b, db.Close()) require.NoError(b, db.Close())
}) })
// TODO: test with other proto format(s)
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), false) handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), Base1)
buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil) buf, _, err := buildWriteRequest(genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil)
require.NoError(b, err) require.NoError(b, err)

View file

@ -117,7 +117,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: tc.cfgs, RemoteWriteConfigs: tc.cfgs,
@ -139,7 +140,8 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg) hash, err := toHash(cfg)
require.NoError(t, err) require.NoError(t, err)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
@ -164,7 +166,8 @@ func TestRestartOnNameChange(t *testing.T) {
func TestUpdateWithRegisterer(t *testing.T) { func TestUpdateWithRegisterer(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, Base1)
c1 := &config.RemoteWriteConfig{ c1 := &config.RemoteWriteConfig{
Name: "named", Name: "named",
URL: &common_config.URL{ URL: &common_config.URL{
@ -204,7 +207,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
func TestWriteStorageLifecycle(t *testing.T) { func TestWriteStorageLifecycle(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig, GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -221,7 +225,8 @@ func TestWriteStorageLifecycle(t *testing.T) {
func TestUpdateExternalLabels(t *testing.T) { func TestUpdateExternalLabels(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, Base1)
externalLabels := labels.FromStrings("external", "true") externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{ conf := &config.Config{
@ -250,8 +255,8 @@ func TestUpdateExternalLabels(t *testing.T) {
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) { func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
conf := &config.Config{ conf := &config.Config{
GlobalConfig: config.GlobalConfig{}, GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{ RemoteWriteConfigs: []*config.RemoteWriteConfig{
@ -276,7 +281,8 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) { func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false) // todo: test with new format type(s)
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, Base1)
c0 := &config.RemoteWriteConfig{ c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second), RemoteTimeout: model.Duration(10 * time.Second),

View file

@ -88,7 +88,7 @@ func createTestAgentDB(t *testing.T, reg prometheus.Registerer, opts *Options) *
t.Helper() t.Helper()
dbDir := t.TempDir() dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false) rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, remote.Base1)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}) })
@ -584,7 +584,7 @@ func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) { tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := log.NewNopLogger() logger := log.NewNopLogger()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false) rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, remote.Base1)
t.Cleanup(func() { t.Cleanup(func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}) })
@ -604,7 +604,7 @@ func TestLockfile(t *testing.T) {
func Test_ExistingWAL_NextRef(t *testing.T) { func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false) rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, remote.Base1)
defer func() { defer func() {
require.NoError(t, rs.Close()) require.NoError(t, rs.Close())
}() }()

View file

@ -253,8 +253,8 @@ func NewAPI(
registerer prometheus.Registerer, registerer prometheus.Registerer,
statsRenderer StatsRenderer, statsRenderer StatsRenderer,
rwEnabled bool, rwEnabled bool,
rwFormat remote.RemoteWriteFormat,
otlpEnabled bool, otlpEnabled bool,
enableRemoteWrite11 bool,
) *API { ) *API {
a := &API{ a := &API{
QueryEngine: qe, QueryEngine: qe,
@ -296,7 +296,7 @@ func NewAPI(
} }
if rwEnabled { if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, enableRemoteWrite11) a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, rwFormat)
} }
if otlpEnabled { if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap) a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)

View file

@ -459,9 +459,10 @@ func TestEndpoints(t *testing.T) {
dbDir := t.TempDir() dbDir := t.TempDir()
// TODO: test with other proto format(s)?
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) { remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil return 0, nil
}, dbDir, 1*time.Second, nil, false) }, dbDir, 1*time.Second, nil, remote.Base1)
err = remote.ApplyConfig(&config.Config{ err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{ RemoteReadConfigs: []*config.RemoteReadConfig{

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/prometheus/prometheus/storage/remote"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@ -135,7 +136,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
nil, nil,
nil, nil,
false, false,
false, remote.Base1,
false, // Disable experimental reduce remote write proto support. false, // Disable experimental reduce remote write proto support.
) )

20984
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/prometheus/prometheus/storage/remote"
"io" "io"
stdlog "log" stdlog "log"
"math" "math"
@ -241,27 +242,27 @@ type Options struct {
Version *PrometheusVersion Version *PrometheusVersion
Flags map[string]string Flags map[string]string
ListenAddress string ListenAddress string
CORSOrigin *regexp.Regexp CORSOrigin *regexp.Regexp
ReadTimeout time.Duration ReadTimeout time.Duration
MaxConnections int MaxConnections int
ExternalURL *url.URL ExternalURL *url.URL
RoutePrefix string RoutePrefix string
UseLocalAssets bool UseLocalAssets bool
UserAssetsPath string UserAssetsPath string
ConsoleTemplatesPath string ConsoleTemplatesPath string
ConsoleLibrariesPath string ConsoleLibrariesPath string
EnableLifecycle bool EnableLifecycle bool
EnableAdminAPI bool EnableAdminAPI bool
PageTitle string PageTitle string
RemoteReadSampleLimit int RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int RemoteReadBytesInFrame int
EnableRemoteWriteReceiver bool EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool EnableOTLPWriteReceiver bool
IsAgent bool IsAgent bool
AppName string AppName string
EnableReceiverRemoteWrite11 bool RemoteWriteFormat remote.RemoteWriteFormat
Gatherer prometheus.Gatherer Gatherer prometheus.Gatherer
Registerer prometheus.Registerer Registerer prometheus.Registerer
@ -322,6 +323,7 @@ func New(logger log.Logger, o *Options) *Handler {
app = h.storage app = h.storage
} }
fmt.Println("rw format for handler is: ", o.RemoteWriteFormat)
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr, h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, app, h.exemplarStorage, factorySPr, factoryTr, factoryAr,
func() config.Config { func() config.Config {
h.mtx.RLock() h.mtx.RLock()
@ -351,8 +353,8 @@ func New(logger log.Logger, o *Options) *Handler {
o.Registerer, o.Registerer,
nil, nil,
o.EnableRemoteWriteReceiver, o.EnableRemoteWriteReceiver,
o.RemoteWriteFormat,
o.EnableOTLPWriteReceiver, o.EnableOTLPWriteReceiver,
o.EnableReceiverRemoteWrite11,
) )
if o.RoutePrefix != "/" { if o.RoutePrefix != "/" {